Merge remote-tracking branch 'origin/development' into petsc-64bit-integer

This commit is contained in:
Martin Diehl 2021-12-27 11:58:53 +01:00
commit e6376f011c
11 changed files with 312 additions and 110 deletions

View File

@ -43,12 +43,13 @@ jobs:
pip install pytest pip install pytest
- name: Install dependencies - name: Install dependencies
# https://github.com/actions/virtual-environments/issues/4790
run: > run: >
sudo apt-get update && sudo apt-get update &&
sudo apt-get install python3-pip python3-pytest python3-pandas python3-scipy sudo apt-get remove mysql* &&
python3-h5py python3-vtk7 python3-matplotlib python3-yaml -y sudo apt-get install python3-pandas python3-scipy python3-h5py python3-vtk7 python3-matplotlib python3-yaml -y
- name: Run unit tests - name: Run unit tests
run: | run: |
export PYTHONPATH=${PWD}/python export PYTHONPATH=${PWD}/python
COLUMNS=256 python -m pytest python COLUMNS=256 pytest python

View File

@ -36,14 +36,17 @@ variables:
# Names of module files to load # Names of module files to load
# =============================================================================================== # ===============================================================================================
# ++++++++++++ Compiler +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ Compiler +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
COMPILER_INTEL: "Compiler/Intel/19.1.2 Libraries/IMKL/2020"
COMPILER_GNU: "Compiler/GNU/10" COMPILER_GNU: "Compiler/GNU/10"
COMPILER_INTELLLVM: "Compiler/oneAPI/2022.0.1 Libraries/IMKL/2022.0.1"
COMPILER_INTEL: "Compiler/Intel/2022.0.1 Libraries/IMKL/2022.0.1"
# ++++++++++++ MPI ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ MPI ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
MPI_INTEL: "MPI/Intel/19.1.2/IntelMPI/2019"
MPI_GNU: "MPI/GNU/10/OpenMPI/4.1.1" MPI_GNU: "MPI/GNU/10/OpenMPI/4.1.1"
MPI_INTELLLVM: "MPI/oneAPI/2022.0.1/IntelMPI/2021.5.0"
MPI_INTEL: "MPI/Intel/2022.0.1/IntelMPI/2021.5.0"
# ++++++++++++ PETSc ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ PETSc ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
PETSC_INTEL: "Libraries/PETSc/3.16.1/Intel-19.1.2-IntelMPI-2019"
PETSC_GNU: "Libraries/PETSc/3.16.1/GNU-10-OpenMPI-4.1.1" PETSC_GNU: "Libraries/PETSc/3.16.1/GNU-10-OpenMPI-4.1.1"
PETSC_INTELLLVM: "Libraries/PETSc/3.16.2/oneAPI-2022.0.1-IntelMPI-2021.5.0"
PETSC_INTEL: "Libraries/PETSc/3.16.2/Intel-2022.0.1-IntelMPI-2021.5.0"
# ++++++++++++ MSC Marc +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # ++++++++++++ MSC Marc +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
MSC: "FEM/MSC/2021.3.1" MSC: "FEM/MSC/2021.3.1"
IntelMarc: "Compiler/Intel/19.1.2 Libraries/IMKL/2020" IntelMarc: "Compiler/Intel/19.1.2 Libraries/IMKL/2020"
@ -76,20 +79,6 @@ mypy:
################################################################################################### ###################################################################################################
test_grid_Intel:
stage: compile
script:
- module load ${COMPILER_INTEL} ${MPI_INTEL} ${PETSC_INTEL}
- cd PRIVATE/testing/pytest
- pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_Intel
test_mesh_Intel:
stage: compile
script:
- module load ${COMPILER_INTEL} ${MPI_INTEL} ${PETSC_INTEL}
- cd PRIVATE/testing/pytest
- pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_Intel
test_grid_GNU: test_grid_GNU:
stage: compile stage: compile
script: script:
@ -104,6 +93,27 @@ test_mesh_GNU:
- cd PRIVATE/testing/pytest - cd PRIVATE/testing/pytest
- pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_GNU - pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_GNU
test_mesh_IntelLLVM:
stage: compile
script:
- module load ${COMPILER_INTELLLVM} ${MPI_INTELLLVM} ${PETSC_INTELLLVM}
- cd PRIVATE/testing/pytest
- pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_IntelLLVM
test_grid_Intel:
stage: compile
script:
- module load ${COMPILER_INTEL} ${MPI_INTEL} ${PETSC_INTEL}
- cd PRIVATE/testing/pytest
- pytest -k 'compile and grid' --basetemp ${TESTROOT}/compile_grid_Intel
test_mesh_Intel:
stage: compile
script:
- module load ${COMPILER_INTEL} ${MPI_INTEL} ${PETSC_INTEL}
- cd PRIVATE/testing/pytest
- pytest -k 'compile and mesh' --basetemp ${TESTROOT}/compile_mesh_Intel
test_Marc: test_Marc:
stage: compile stage: compile
script: script:

View File

@ -82,6 +82,8 @@ if (CMAKE_Fortran_COMPILER_ID STREQUAL "Intel")
include(Compiler-Intel) include(Compiler-Intel)
elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
include(Compiler-GNU) include(Compiler-GNU)
elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "IntelLLVM")
include(Compiler-IntelLLVM)
else() else()
message(FATAL_ERROR "Compiler type(CMAKE_Fortran_COMPILER_ID) not recognized") message(FATAL_ERROR "Compiler type(CMAKE_Fortran_COMPILER_ID) not recognized")
endif() endif()

View File

@ -106,8 +106,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0")
#set (DEBUG_FLAGS "${DEBUG_FLAGS},stderrors") #set (DEBUG_FLAGS "${DEBUG_FLAGS},stderrors")
# ... warnings about Fortran standard violations are changed to errors # ... warnings about Fortran standard violations are changed to errors
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all") #set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all")
# generate debug information for parameters # generate debug information for parameters
# Disabled due to ICE when compiling phase_damage.f90 (not understandable, there is no parameter in there)
# Additional options # Additional options
# -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits # -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits

View File

@ -0,0 +1,121 @@
###################################################################################################
# Intel Compiler
###################################################################################################
if (CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 18.0)
message (FATAL_ERROR "Intel Compiler version: ${CMAKE_Fortran_COMPILER_VERSION} not supported")
endif ()
if (OPENMP)
set (OPENMP_FLAGS "-qopenmp")
endif ()
if (OPTIMIZATION STREQUAL "OFF")
set (OPTIMIZATION_FLAGS "-O0")
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
set (OPTIMIZATION_FLAGS "-O2")
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
set (OPTIMIZATION_FLAGS "-ipo -O3 -fp-model fast=2 -xHost")
# -fast = -ipo, -O3, -no-prec-div, -static, -fp-model fast=2, and -xHost"
endif ()
# -assume std_mod_proc_name (included in -standard-semantics) causes problems if other modules
# (PETSc, HDF5) are not compiled with this option (https://software.intel.com/en-us/forums/intel-fortran-compiler-for-linux-and-mac-os-x/topic/62172)
set (STANDARD_CHECK "-stand f18 -assume nostd_mod_proc_name")
set (LINKER_FLAGS "${LINKER_FLAGS} -shared-intel")
# Link against shared Intel libraries instead of static ones
#------------------------------------------------------------------------------------------------
# Fine tuning compilation options
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fpp")
# preprocessor
set (COMPILE_FLAGS "${COMPILE_FLAGS} -ftz")
# flush underflow to zero, automatically set if -O[1,2,3]
set (COMPILE_FLAGS "${COMPILE_FLAGS} -diag-disable")
# disables warnings ...
set (COMPILE_FLAGS "${COMPILE_FLAGS} 5268")
# ... the text exceeds right hand column allowed on the line (we have only comments there)
set (COMPILE_FLAGS "${COMPILE_FLAGS},7624")
# ... about deprecated forall (has nice syntax and most likely a performance advantage)
set (COMPILE_FLAGS "${COMPILE_FLAGS} -warn")
# enables warnings ...
set (COMPILE_FLAGS "${COMPILE_FLAGS} declarations")
# ... any undeclared names (alternative name: -implicitnone)
set (COMPILE_FLAGS "${COMPILE_FLAGS},general")
# ... warning messages and informational messages are issued by the compiler
set (COMPILE_FLAGS "${COMPILE_FLAGS},usage")
# ... questionable programming practices
set (COMPILE_FLAGS "${COMPILE_FLAGS},interfaces")
# ... checks the interfaces of all SUBROUTINEs called and FUNCTIONs invoked in your compilation against an external set of interface blocks
set (COMPILE_FLAGS "${COMPILE_FLAGS},ignore_loc")
# ... %LOC is stripped from an actual argument
set (COMPILE_FLAGS "${COMPILE_FLAGS},alignments")
# ... data that is not naturally aligned
set (COMPILE_FLAGS "${COMPILE_FLAGS},unused")
# ... declared variables that are never used
# Additional options
# -warn: enables warnings, where
# truncated_source: Determines whether warnings occur when source exceeds the maximum column width in fixed-format files.
# (too many warnings because we have comments beyond character 132)
# uncalled: Determines whether warnings occur when a statement function is never called
# all:
# -name as_is: case sensitive Fortran!
#------------------------------------------------------------------------------------------------
# Runtime debugging
set (DEBUG_FLAGS "${DEBUG_FLAGS} -g")
# Generate symbolic debugging information in the object file
set (DEBUG_FLAGS "${DEBUG_FLAGS} -traceback")
# Generate extra information in the object file to provide source file traceback information when a severe error occurs at run time
set (DEBUG_FLAGS "${DEBUG_FLAGS} -gen-interfaces")
# Generate an interface block for each routine. http://software.intel.com/en-us/blogs/2012/01/05/doctor-fortran-gets-explicit-again/
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fp-stack-check")
# Generate extra code after every function call to ensure that the floating-point (FP) stack is in the expected state
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fp-model strict")
# Trap uninitalized variables
set (DEBUG_FLAGS "${DEBUG_FLAGS} -check" )
# Checks at runtime ...
set (DEBUG_FLAGS "${DEBUG_FLAGS} bounds")
# ... if an array index is too small (<1) or too large!
set (DEBUG_FLAGS "${DEBUG_FLAGS},format")
# ... for the data type of an item being formatted for output.
set (DEBUG_FLAGS "${DEBUG_FLAGS},output_conversion")
# ... for the fit of data items within a designated format descriptor field.
set (DEBUG_FLAGS "${DEBUG_FLAGS},pointers")
# ... for certain disassociated or uninitialized pointers or unallocated allocatable objects.
set (DEBUG_FLAGS "${DEBUG_FLAGS},uninit")
# ... for uninitialized variables.
set (DEBUG_FLAGS "${DEBUG_FLAGS} -ftrapuv")
# ... initializes stack local variables to an unusual value to aid error detection
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0")
# ... capture all floating-point exceptions, sets -ftz automatically
# disable due to compiler bug https://community.intel.com/t5/Intel-Fortran-Compiler/false-positive-stand-f18-and-IEEE-SELECTED-REAL-KIND/m-p/1227336
#set (DEBUG_FLAGS "${DEBUG_FLAGS} -warn")
# enables warnings ...
#set (DEBUG_FLAGS "${DEBUG_FLAGS} errors")
# ... warnings are changed to errors
#set (DEBUG_FLAGS "${DEBUG_FLAGS},stderrors")
# ... warnings about Fortran standard violations are changed to errors
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all")
# generate debug information for parameters
# Additional options
# -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits
# -check: Checks at runtime, where
# arg_temp_created: will cause a lot of warnings because we create a bunch of temporary arrays (performance?)
# stack:
#------------------------------------------------------------------------------------------------
# precision settings
set (PRECISION_FLAGS "${PRECISION_FLAGS} -real-size 64")
# set precision for standard real to 32 | 64 | 128 (= 4 | 8 | 16 bytes, type pReal is always 8 bytes)

View File

@ -1,9 +1,19 @@
type: Hooke type: Hooke
references: references:
- J.P. Hirth and J. Lothe, - D.J. Dever,
Theory of Dislocations, 1982, Journal of Applied Physics 43(8):3293-3301, 1972,
John Wiley & Sons, https://doi.org/10.1063/1.1661710
page 837
C_11: 242.e9 T_ref: 300
C_12: 146.5e+9
C_44: 112.e9 C_11: 231.7e+9
C_11,T: -47.6e+6
C_11,T^2: -54.4e+3
C_12: 135.8e+9
C_12,T: -12.9e+6
C_12,T^2: -7.3e+3
C_44: 116.8e+9
C_44,T: -19.4e+6
C_44,T^2: -2.5e+3

View File

@ -1 +1 @@
v3.0.0-alpha5-297-g5ecfba1e5 v3.0.0-alpha5-333-g01cd92755

View File

@ -197,7 +197,7 @@ class Grid:
Grid-based geometry from file. Grid-based geometry from file.
""" """
warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.1.0', DeprecationWarning,2) warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.0.0', DeprecationWarning,2)
try: try:
f = open(fname) f = open(fname)
except TypeError: except TypeError:
@ -629,7 +629,7 @@ class Grid:
Compress geometry with 'x of y' and 'a to b'. Compress geometry with 'x of y' and 'a to b'.
""" """
warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.1.0', DeprecationWarning,2) warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.0.0', DeprecationWarning,2)
header = [f'{len(self.comments)+4} header'] + self.comments \ header = [f'{len(self.comments)+4} header'] + self.comments \
+ ['grid a {} b {} c {}'.format(*self.cells), + ['grid a {} b {} c {}'.format(*self.cells),
'size x {} y {} z {}'.format(*self.size), 'size x {} y {} z {}'.format(*self.size),

View File

@ -4,6 +4,7 @@ import fnmatch
import os import os
import copy import copy
import datetime import datetime
import warnings
import xml.etree.ElementTree as ET import xml.etree.ElementTree as ET
import xml.dom.minidom import xml.dom.minidom
from pathlib import Path from pathlib import Path
@ -27,6 +28,20 @@ h5py3 = h5py.__version__[0] == '3'
chunk_size = 1024**2//8 # for compression in HDF5 chunk_size = 1024**2//8 # for compression in HDF5
def _view_transition(what,datasets,increments,times,phases,homogenizations,fields):
if (datasets is not None and what is None) or (what is not None and datasets is None):
raise ValueError('"what" and "datasets" need to be used as a pair')
if datasets is not None or what is not None:
warnings.warn('Arguments "what" and "datasets" will be removed in DAMASK v3.0.0-alpha7', DeprecationWarning,2)
return what,datasets
if sum(1 for _ in filter(None.__ne__, [increments,times,phases,homogenizations,fields])) > 1:
raise ValueError('Only one out of "increments", "times", "phases", "homogenizations", and "fields" can be used')
else:
if increments is not None: return "increments", increments
if times is not None: return "times", times
if phases is not None: return "phases", phases
if homogenizations is not None: return "homogenizations", homogenizations
if fields is not None: return "fields", fields
def _read(dataset): def _read(dataset):
"""Read a dataset and its metadata into a numpy.ndarray.""" """Read a dataset and its metadata into a numpy.ndarray."""
@ -79,7 +94,7 @@ class Result:
>>> r.add_Cauchy() >>> r.add_Cauchy()
>>> r.add_equivalent_Mises('sigma') >>> r.add_equivalent_Mises('sigma')
>>> r.export_VTK() >>> r.export_VTK()
>>> r_last = r.view('increments',-1) >>> r_last = r.view(increments=-1)
>>> sigma_vM_last = r_last.get('sigma_vM') >>> sigma_vM_last = r_last.get('sigma_vM')
""" """
@ -141,7 +156,7 @@ class Result:
self.fname = Path(fname).absolute() self.fname = Path(fname).absolute()
self._allow_modification = False self._protected = True
def __copy__(self): def __copy__(self):
@ -155,10 +170,10 @@ class Result:
"""Show summary of file content.""" """Show summary of file content."""
visible_increments = self.visible['increments'] visible_increments = self.visible['increments']
first = self.view('increments',visible_increments[0:1]).list_data() first = self.view(increments=visible_increments[0:1]).list_data()
last = '' if len(visible_increments) < 2 else \ last = '' if len(visible_increments) < 2 else \
self.view('increments',visible_increments[-1:]).list_data() self.view(increments=visible_increments[-1:]).list_data()
in_between = '' if len(visible_increments) < 3 else \ in_between = '' if len(visible_increments) < 3 else \
''.join([f'\n{inc}\n ...\n' for inc in visible_increments[1:-1]]) ''.join([f'\n{inc}\n ...\n' for inc in visible_increments[1:-1]])
@ -231,36 +246,6 @@ class Result:
return dup return dup
def modification_enable(self):
"""
Allow modification of existing data.
Returns
-------
modified_view : damask.Result
View without write-protection of existing data.
"""
print(util.warn('Warning: Modification of existing datasets allowed!'))
dup = self.copy()
dup._allow_modification = True
return dup
def modification_disable(self):
"""
Prevent modification of existing data (default case).
Returns
-------
modified_view : damask.Result
View with write-protection of existing data.
"""
dup = self.copy()
dup._allow_modification = False
return dup
def increments_in_range(self,start,end): def increments_in_range(self,start,end):
""" """
Get all increments within a given range. Get all increments within a given range.
@ -285,7 +270,6 @@ class Result:
selected.append(self.increments[i]) selected.append(self.increments[i])
return selected return selected
def times_in_range(self,start,end): def times_in_range(self,start,end):
""" """
Get all increments within a given time range. Get all increments within a given time range.
@ -310,17 +294,38 @@ class Result:
return selected return selected
def view(self,what,datasets): def view(self,what=None,datasets=None,*,
increments=None,
times=None,
phases=None,
homogenizations=None,
fields=None,
protected=None):
""" """
Set view. Set view.
Wildcard matching with '?' and '*' is supported.
True is equivalent to '*', False is equivalent to [].
Parameters Parameters
---------- ----------
what : {'increments', 'times', 'phases', 'homogenizations', 'fields'} what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
Attribute to change. Attribute to change. DEPRECATED.
datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool
Name of datasets; supports '?' and '*' wildcards. Name of datasets; supports '?' and '*' wildcards. DEPRECATED.
True is equivalent to '*', False is equivalent to []. True is equivalent to '*', False is equivalent to [].
increments: (list of) int, (list of) str, or bool, optional.
Number(s) of increments to select.
times: (list of) float, (list of) str, or bool, optional.
Simulation time(s) of increments to select.
phases: (list of) str, or bool, optional.
Name(s) of phases to select.
homogenizations: (list of) str, or bool, optional.
Name(s) of homogenizations to select.
fields: (list of) str, or bool, optional.
Name(s) of fields to select.
protected: bool, optional.
Protection status of existing data.
Returns Returns
------- -------
@ -333,29 +338,61 @@ class Result:
>>> import damask >>> import damask
>>> r = damask.Result('my_file.hdf5') >>> r = damask.Result('my_file.hdf5')
>>> r_first = r.view('increment',0) >>> r_first = r.view(increment=0)
Get a view that shows all results between simulation times of 10 to 40: Get a view that shows all results between simulation times of 10 to 40:
>>> import damask >>> import damask
>>> r = damask.Result('my_file.hdf5') >>> r = damask.Result('my_file.hdf5')
>>> r_t10to40 = r.view('times',r.times_in_range(10.0,40.0)) >>> r_t10to40 = r.view(times=r.times_in_range(10.0,40.0))
""" """
return self._manage_view('set',what,datasets) v = _view_transition(what,datasets,increments,times,phases,homogenizations,fields)
if protected is not None:
if v is None:
dup = self.copy()
else:
what_,datasets_ = v
dup = self._manage_view('set',what_,datasets_)
if not protected:
print(util.warn('Warning: Modification of existing datasets allowed!'))
dup._protected = protected
else:
what_,datasets_ = v
dup = self._manage_view('set',what_,datasets_)
return dup
def view_more(self,what,datasets): def view_more(self,what=None,datasets=None,*,
increments=None,
times=None,
phases=None,
homogenizations=None,
fields=None):
""" """
Add to view. Add to view.
Wildcard matching with '?' and '*' is supported.
True is equivalent to '*', False is equivalent to [].
Parameters Parameters
---------- ----------
what : {'increments', 'times', 'phases', 'homogenizations', 'fields'} what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
Attribute to change. Attribute to change. DEPRECATED.
datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool
Name of datasets; supports '?' and '*' wildcards. Name of datasets; supports '?' and '*' wildcards. DEPRECATED.
True is equivalent to '*', False is equivalent to []. True is equivalent to '*', False is equivalent to [].
increments: (list of) int, (list of) str, or bool, optional.
Number(s) of increments to select.
times: (list of) float, (list of) str, or bool, optional.
Simulation time(s) of increments to select.
phases: (list of) str, or bool, optional.
Name(s) of phases to select.
homogenizations: (list of) str, or bool, optional.
Name(s) of homogenizations to select.
fields: (list of) str, or bool, optional.
Name(s) of fields to select.
Returns Returns
------- -------
@ -367,25 +404,44 @@ class Result:
Get a view that shows only results from first and last increment: Get a view that shows only results from first and last increment:
>>> import damask >>> import damask
>>> r_empty = damask.Result('my_file.hdf5').view('increments',False) >>> r_empty = damask.Result('my_file.hdf5').view(increments=False)
>>> r_first = r_empty.view_more('increments',0) >>> r_first = r_empty.view_more(increments=0)
>>> r_first_and_last = r.first.view_more('increments',-1) >>> r_first_and_last = r.first.view_more(increments=-1)
""" """
return self._manage_view('add',what,datasets) what_, datasets_ = _view_transition(what,datasets,increments,times,phases,homogenizations,fields)
return self._manage_view('add',what_,datasets_)
def view_less(self,what,datasets): def view_less(self,what=None,datasets=None,*,
increments=None,
times=None,
phases=None,
homogenizations=None,
fields=None):
""" """
Remove from view. Remove from view.
Wildcard matching with '?' and '*' is supported.
True is equivalent to '*', False is equivalent to [].
Parameters Parameters
---------- ----------
what : {'increments', 'times', 'phases', 'homogenizations', 'fields'} what : {'increments', 'times', 'phases', 'homogenizations', 'fields'}
Attribute to change. Attribute to change. DEPRECATED.
datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool datasets : (list of) int (for increments), (list of) float (for times), (list of) str, or bool
Name of datasets; supports '?' and '*' wildcards. Name of datasets; supports '?' and '*' wildcards. DEPRECATED.
True is equivalent to '*', False is equivalent to []. True is equivalent to '*', False is equivalent to [].
increments: (list of) int, (list of) str, or bool, optional.
Number(s) of increments to select.
times: (list of) float, (list of) str, or bool, optional.
Simulation time(s) of increments to select.
phases: (list of) str, or bool, optional.
Name(s) of phases to select.
homogenizations: (list of) str, or bool, optional.
Name(s) of homogenizations to select.
fields: (list of) str, or bool, optional.
Name(s) of fields to select.
Returns Returns
------- -------
@ -398,10 +454,11 @@ class Result:
>>> import damask >>> import damask
>>> r_all = damask.Result('my_file.hdf5') >>> r_all = damask.Result('my_file.hdf5')
>>> r_deformed = r_all.view_less('increments',0) >>> r_deformed = r_all.view_less(increments=0)
""" """
return self._manage_view('del',what,datasets) what_, datasets_ = _view_transition(what,datasets,increments,times,phases,homogenizations,fields)
return self._manage_view('del',what_,datasets_)
def rename(self,name_src,name_dst): def rename(self,name_src,name_dst):
@ -424,11 +481,11 @@ class Result:
>>> import damask >>> import damask
>>> r = damask.Result('my_file.hdf5') >>> r = damask.Result('my_file.hdf5')
>>> r_unprotected = r.modification_enable() >>> r_unprotected = r.view(protected=False)
>>> r_unprotected.rename('F','def_grad') >>> r_unprotected.rename('F','def_grad')
""" """
if not self._allow_modification: if self._protected:
raise PermissionError('Renaming datasets not permitted') raise PermissionError('Renaming datasets not permitted')
with h5py.File(self.fname,'a') as f: with h5py.File(self.fname,'a') as f:
@ -463,11 +520,11 @@ class Result:
>>> import damask >>> import damask
>>> r = damask.Result('my_file.hdf5') >>> r = damask.Result('my_file.hdf5')
>>> r_unprotected = r.modification_enable() >>> r_unprotected = r.view(protected=False)
>>> r_unprotected.remove('F') >>> r_unprotected.remove('F')
""" """
if not self._allow_modification: if self._protected:
raise PermissionError('Removing datasets not permitted') raise PermissionError('Removing datasets not permitted')
with h5py.File(self.fname,'a') as f: with h5py.File(self.fname,'a') as f:
@ -1358,7 +1415,7 @@ class Result:
lock.acquire() lock.acquire()
with h5py.File(self.fname, 'a') as f: with h5py.File(self.fname, 'a') as f:
try: try:
if self._allow_modification and '/'.join([group,result['label']]) in f: if not self._protected and '/'.join([group,result['label']]) in f:
dataset = f['/'.join([group,result['label']])] dataset = f['/'.join([group,result['label']])]
dataset[...] = result['data'] dataset[...] = result['data']
dataset.attrs['overwritten'] = True dataset.attrs['overwritten'] = True

View File

@ -25,7 +25,7 @@ def default(tmp_path,ref_path):
fname = '12grains6x7x8_tensionY.hdf5' fname = '12grains6x7x8_tensionY.hdf5'
shutil.copy(ref_path/fname,tmp_path) shutil.copy(ref_path/fname,tmp_path)
f = Result(tmp_path/fname) f = Result(tmp_path/fname)
return f.view('times',20.0) return f.view(times=20.0)
@pytest.fixture @pytest.fixture
def single_phase(tmp_path,ref_path): def single_phase(tmp_path,ref_path):
@ -58,14 +58,14 @@ class TestResult:
def test_view_all(self,default): def test_view_all(self,default):
a = default.view('increments',True).get('F') a = default.view(increments=True).get('F')
assert dict_equal(a,default.view('increments','*').get('F')) assert dict_equal(a,default.view(increments='*').get('F'))
assert dict_equal(a,default.view('increments',default.increments_in_range(0,np.iinfo(int).max)).get('F')) assert dict_equal(a,default.view(increments=default.increments_in_range(0,np.iinfo(int).max)).get('F'))
assert dict_equal(a,default.view('times',True).get('F')) assert dict_equal(a,default.view(times=True).get('F'))
assert dict_equal(a,default.view('times','*').get('F')) assert dict_equal(a,default.view(times='*').get('F'))
assert dict_equal(a,default.view('times',default.times_in_range(0.0,np.inf)).get('F')) assert dict_equal(a,default.view(times=default.times_in_range(0.0,np.inf)).get('F'))
@pytest.mark.parametrize('what',['increments','times','phases','fields']) # ToDo: discuss homogenizations @pytest.mark.parametrize('what',['increments','times','phases','fields']) # ToDo: discuss homogenizations
def test_view_none(self,default,what): def test_view_none(self,default,what):
@ -314,7 +314,7 @@ class TestResult:
@pytest.mark.parametrize('overwrite',['off','on']) @pytest.mark.parametrize('overwrite',['off','on'])
def test_add_overwrite(self,default,overwrite): def test_add_overwrite(self,default,overwrite):
last = default.view('increments',-1) last = default.view(increments=-1)
last.add_stress_Cauchy() last.add_stress_Cauchy()
@ -322,9 +322,9 @@ class TestResult:
created_first = datetime.strptime(created_first,'%Y-%m-%d %H:%M:%S%z') created_first = datetime.strptime(created_first,'%Y-%m-%d %H:%M:%S%z')
if overwrite == 'on': if overwrite == 'on':
last = last.modification_enable() last = last.view(protected=False)
else: else:
last = last.modification_disable() last = last.view(protected=True)
time.sleep(2.) time.sleep(2.)
try: try:
@ -344,10 +344,10 @@ class TestResult:
def test_rename(self,default,allowed): def test_rename(self,default,allowed):
if allowed == 'on': if allowed == 'on':
F = default.place('F') F = default.place('F')
default = default.modification_enable() default = default.view(protected=False)
default.rename('F','new_name') default.rename('F','new_name')
assert np.all(F == default.place('new_name')) assert np.all(F == default.place('new_name'))
default = default.modification_disable() default = default.view(protected=True)
with pytest.raises(PermissionError): with pytest.raises(PermissionError):
default.rename('P','another_new_name') default.rename('P','another_new_name')
@ -355,7 +355,7 @@ class TestResult:
@pytest.mark.parametrize('allowed',['off','on']) @pytest.mark.parametrize('allowed',['off','on'])
def test_remove(self,default,allowed): def test_remove(self,default,allowed):
if allowed == 'on': if allowed == 'on':
unsafe = default.modification_enable() unsafe = default.view(protected=False)
unsafe.remove('F') unsafe.remove('F')
assert unsafe.get('F') is None assert unsafe.get('F') is None
else: else:
@ -377,7 +377,7 @@ class TestResult:
@pytest.mark.parametrize('inc',[4,0],ids=range(2)) @pytest.mark.parametrize('inc',[4,0],ids=range(2))
@pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0])<9, reason='missing "Direction" attribute') @pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0])<9, reason='missing "Direction" attribute')
def test_vtk(self,request,tmp_path,ref_path,update,patch_execution_stamp,patch_datetime_now,output,fname,inc): def test_vtk(self,request,tmp_path,ref_path,update,patch_execution_stamp,patch_datetime_now,output,fname,inc):
result = Result(ref_path/fname).view('increments',inc) result = Result(ref_path/fname).view(increments=inc)
os.chdir(tmp_path) os.chdir(tmp_path)
result.export_VTK(output,parallel=False) result.export_VTK(output,parallel=False)
fname = fname.split('.')[0]+f'_inc{(inc if type(inc) == int else inc[0]):0>2}.vti' fname = fname.split('.')[0]+f'_inc{(inc if type(inc) == int else inc[0]):0>2}.vti'
@ -400,7 +400,7 @@ class TestResult:
result.export_VTK(output,mode) result.export_VTK(output,mode)
def test_marc_coordinates(self,ref_path): def test_marc_coordinates(self,ref_path):
result = Result(ref_path/'check_compile_job1.hdf5').view('increments',-1) result = Result(ref_path/'check_compile_job1.hdf5').view(increments=-1)
c_n = result.coordinates0_node + result.get('u_n') c_n = result.coordinates0_node + result.get('u_n')
c_p = result.coordinates0_point + result.get('u_p') c_p = result.coordinates0_point + result.get('u_p')
assert len(c_n) > len(c_p) assert len(c_n) > len(c_p)
@ -440,7 +440,7 @@ class TestResult:
dim_xdmf = reader_xdmf.GetOutput().GetDimensions() dim_xdmf = reader_xdmf.GetOutput().GetDimensions()
bounds_xdmf = reader_xdmf.GetOutput().GetBounds() bounds_xdmf = reader_xdmf.GetOutput().GetBounds()
single_phase.view('increments',0).export_VTK(parallel=False) single_phase.view(increments=0).export_VTK(parallel=False)
fname = os.path.splitext(os.path.basename(single_phase.fname))[0]+'_inc00.vti' fname = os.path.splitext(os.path.basename(single_phase.fname))[0]+'_inc00.vti'
reader_vti = vtk.vtkXMLImageDataReader() reader_vti = vtk.vtkXMLImageDataReader()
reader_vti.SetFileName(fname) reader_vti.SetFileName(fname)

View File

@ -20,7 +20,7 @@ def default():
"""Simple VTK.""" """Simple VTK."""
cells = np.array([5,6,7],int) cells = np.array([5,6,7],int)
size = np.array([.6,1.,.5]) size = np.array([.6,1.,.5])
return VTK.from_rectilinear_grid(cells,size) return VTK.from_image_data(cells,size)
class TestVTK: class TestVTK:
@ -116,7 +116,7 @@ class TestVTK:
def test_add_extension(self,tmp_path,default): def test_add_extension(self,tmp_path,default):
default.save(tmp_path/'default.txt',parallel=False) default.save(tmp_path/'default.txt',parallel=False)
assert os.path.isfile(tmp_path/'default.txt.vtr') assert os.path.isfile(tmp_path/'default.txt.vti')
def test_invalid_get(self,default): def test_invalid_get(self,default):
@ -160,7 +160,7 @@ class TestVTK:
def test_comments(self,tmp_path,default): def test_comments(self,tmp_path,default):
default.add_comments(['this is a comment']) default.add_comments(['this is a comment'])
default.save(tmp_path/'with_comments',parallel=False) default.save(tmp_path/'with_comments',parallel=False)
new = VTK.load(tmp_path/'with_comments.vtr') new = VTK.load(tmp_path/'with_comments.vti')
assert new.get_comments() == ['this is a comment'] assert new.get_comments() == ['this is a comment']
@pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0])<8, reason='missing METADATA') @pytest.mark.xfail(int(vtk.vtkVersion.GetVTKVersion().split('.')[0])<8, reason='missing METADATA')