Merge branch 'compressed-hdf5' into 'development'

Compressed HDF5

See merge request damask/DAMASK!299
This commit is contained in:
Franz Roters 2020-12-10 16:52:52 +01:00
commit bda5a50ff3
9 changed files with 92 additions and 156 deletions

View File

@ -46,7 +46,7 @@ class Result:
self.version_major = f.attrs['DADF5_version_major']
self.version_minor = f.attrs['DADF5_version_minor']
if self.version_major != 0 or not 7 <= self.version_minor <= 10:
if self.version_major != 0 or not 7 <= self.version_minor <= 11:
raise TypeError(f'Unsupported DADF5 version {self.version_major}.{self.version_minor}')
self.structured = 'grid' in f['geometry'].attrs.keys() or \
@ -790,7 +790,10 @@ class Result:
lattice = {'fcc':'cF','bcc':'cI','hex':'hP'}[q['meta']['Lattice']]
except KeyError:
lattice = q['meta']['Lattice']
o = Orientation(rotation = (rfn.structured_to_unstructured(q['data'])),lattice=lattice)
try:
o = Orientation(rotation = (rfn.structured_to_unstructured(q['data'])),lattice=lattice)
except ValueError:
o = Orientation(rotation = q['data'],lattice=lattice)
return {
'data': np.uint8(o.IPF_color(l)*255),
@ -1129,6 +1132,7 @@ class Result:
Arguments parsed to func.
"""
chunk_size = 1024**2//8
num_threads = damask.environment.options['DAMASK_NUM_THREADS']
pool = mp.Pool(int(num_threads) if num_threads is not None else None)
lock = mp.Manager().Lock()
@ -1152,7 +1156,15 @@ class Result:
dataset.attrs['Overwritten'] = 'Yes' if h5py3 else \
'Yes'.encode()
else:
dataset = f[result[0]].create_dataset(result[1]['label'],data=result[1]['data'])
if result[1]['data'].size >= chunk_size*2:
shape = result[1]['data'].shape
chunks = (chunk_size//np.prod(shape[1:]),)+shape[1:]
dataset = f[result[0]].create_dataset(result[1]['label'],data=result[1]['data'],
maxshape=shape, chunks=chunks,
compression='gzip', compression_opts=6,
shuffle=True,fletcher32=True)
else:
dataset = f[result[0]].create_dataset(result[1]['label'],data=result[1]['data'])
now = datetime.datetime.now().astimezone()
dataset.attrs['Created'] = now.strftime('%Y-%m-%d %H:%M:%S%z') if h5py3 else \

View File

@ -12,7 +12,6 @@ module HDF5_utilities
use prec
use parallelization
use rotations
implicit none
public
@ -37,7 +36,6 @@ module HDF5_utilities
module procedure HDF5_read_int5
module procedure HDF5_read_int6
module procedure HDF5_read_int7
end interface HDF5_read
!--------------------------------------------------------------------------------------------------
@ -60,9 +58,6 @@ module HDF5_utilities
module procedure HDF5_write_int5
module procedure HDF5_write_int6
module procedure HDF5_write_int7
module procedure HDF5_write_rotation
end interface HDF5_write
!--------------------------------------------------------------------------------------------------
@ -175,7 +170,7 @@ end subroutine HDF5_closeFile
!--------------------------------------------------------------------------------------------------
integer(HID_T) function HDF5_addGroup(fileHandle,groupName)
integer(HID_T), intent(in) :: fileHandle
integer(HID_T), intent(in) :: fileHandle
character(len=*), intent(in) :: groupName
integer :: hdferr
@ -1663,82 +1658,6 @@ subroutine HDF5_write_int7(loc_id,dataset,datasetName,parallel)
end subroutine HDF5_write_int7
!--------------------------------------------------------------------------------------------------
!> @brief writes a scalar orientation dataset
! ToDo: It might be possible to write the dataset as a whole
! ToDo: We could optionally write out other representations (axis angle, euler, ...)
!--------------------------------------------------------------------------------------------------
subroutine HDF5_write_rotation(loc_id,dataset,datasetName,parallel)
type(rotation), intent(in), dimension(:) :: dataset !< data written to file
integer(HID_T), intent(in) :: loc_id !< file or group handle
character(len=*), intent(in) :: datasetName !< name of the dataset in the file
logical, intent(in), optional :: parallel
integer :: hdferr
real(pReal), dimension(4,size(dataset)) :: dataset_asArray
integer(HID_T) :: dset_id, filespace_id, memspace_id, plist_id,dtype_id,w_id,x_id,y_id,z_id
integer(HSIZE_T), dimension(size(shape(dataset))) :: &
myStart, &
myShape, & !< shape of the dataset (this process)
totalShape !< shape of the dataset (all processes)
integer(SIZE_T) :: type_size_real
integer :: i
do i = 1, size(dataset)
dataset_asArray(1:4,i) = dataset(i)%asQuaternion()
enddo
!---------------------------------------------------------------------------------------------------
! determine shape of dataset
myShape = int(shape(dataset),HSIZE_T)
!---------------------------------------------------------------------------------------------------
! compound type: name of each quaternion component
call h5tget_size_f(H5T_NATIVE_DOUBLE, type_size_real, hdferr)
call h5tcreate_f(H5T_COMPOUND_F, type_size_real*4_SIZE_T, dtype_id, hdferr)
call h5tinsert_f(dtype_id, "w", type_size_real*0_SIZE_T, H5T_NATIVE_DOUBLE, hdferr)
call h5tinsert_f(dtype_id, "x", type_size_real*1_SIZE_T, H5T_NATIVE_DOUBLE, hdferr)
call h5tinsert_f(dtype_id, "y", type_size_real*2_SIZE_T, H5T_NATIVE_DOUBLE, hdferr)
call h5tinsert_f(dtype_id, "z", type_size_real*3_SIZE_T, H5T_NATIVE_DOUBLE, hdferr)
if (present(parallel)) then
call initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
myStart, totalShape, loc_id,myShape,datasetName,dtype_id,parallel)
else
call initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
myStart, totalShape, loc_id,myShape,datasetName,dtype_id,.false.)
endif
call h5pset_preserve_f(plist_id, .TRUE., hdferr)
if (product(totalShape) /= 0) then
call h5tcreate_f(H5T_COMPOUND_F, type_size_real, x_id, hdferr)
call h5tinsert_f(x_id, "x", 0_SIZE_T, H5T_NATIVE_DOUBLE, hdferr)
call h5tcreate_f(H5T_COMPOUND_F, type_size_real, w_id, hdferr)
call h5tinsert_f(w_id, "w", 0_SIZE_T, H5T_NATIVE_DOUBLE, hdferr)
call h5tcreate_f(H5T_COMPOUND_F, type_size_real, y_id, hdferr)
call h5tinsert_f(y_id, "y", 0_SIZE_T, H5T_NATIVE_DOUBLE, hdferr)
call h5tcreate_f(H5T_COMPOUND_F, type_size_real, z_id, hdferr)
call h5tinsert_f(z_id, "z", 0_SIZE_T, H5T_NATIVE_DOUBLE, hdferr)
call h5dwrite_f(dset_id, w_id,dataset_asArray(1,:),int(totalShape,HSIZE_T), hdferr,&
file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id)
call h5dwrite_f(dset_id, x_id,dataset_asArray(2,:),int(totalShape,HSIZE_T), hdferr,&
file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id)
call h5dwrite_f(dset_id, y_id,dataset_asArray(3,:),int(totalShape,HSIZE_T), hdferr,&
file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id)
call h5dwrite_f(dset_id, z_id,dataset_asArray(4,:),int(totalShape,HSIZE_T), hdferr,&
file_space_id = filespace_id, mem_space_id = memspace_id, xfer_prp = plist_id)
if(hdferr < 0) error stop 'HDF5 error'
endif
call finalize_write(plist_id, dset_id, filespace_id, memspace_id)
end subroutine HDF5_write_rotation
!--------------------------------------------------------------------------------------------------
!> @brief initialize HDF5 handles, determines global shape and start for parallel read
!--------------------------------------------------------------------------------------------------
@ -1789,7 +1708,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
!--------------------------------------------------------------------------------------------------
! creating a property list for IO and set it to collective
call h5pcreate_f(H5P_DATASET_ACCESS_F, aplist_id, hdferr)
if(hdferr < 0) error stop 'HDF5 error'
if(hdferr < 0) error stop 'HDF5 error'
#ifdef PETSc
call h5pset_all_coll_metadata_ops_f(aplist_id, .true., hdferr)
if(hdferr < 0) error stop 'HDF5 error'
@ -1815,7 +1734,7 @@ end subroutine initialize_read
!--------------------------------------------------------------------------------------------------
subroutine finalize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id)
integer(HID_T), intent(in) :: dset_id, filespace_id, memspace_id, plist_id, aplist_id
integer(HID_T), intent(in) :: dset_id, filespace_id, memspace_id, plist_id, aplist_id
integer :: hdferr
call h5pclose_f(plist_id, hdferr)
@ -1836,8 +1755,8 @@ end subroutine finalize_read
!> @brief initialize HDF5 handles, determines global shape and start for parallel write
!--------------------------------------------------------------------------------------------------
subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
myStart, totalShape, &
loc_id,myShape,datasetName,datatype,parallel)
myStart, totalShape, &
loc_id,myShape,datasetName,datatype,parallel)
integer(HID_T), intent(in) :: loc_id !< file or group handle
character(len=*), intent(in) :: datasetName !< name of the dataset in the file
@ -1850,10 +1769,10 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
totalShape !< shape of the dataset (all processes)
integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id
integer, dimension(worldsize) :: &
writeSize !< contribution of all processes
integer :: ierr
integer :: hdferr
integer, dimension(worldsize) :: writeSize !< contribution of all processes
integer(HID_T) :: dcpl
integer :: ierr, hdferr
integer(HSIZE_T), parameter :: chunkSize = 1024_HSIZE_T**2/8_HSIZE_T
!-------------------------------------------------------------------------------------------------
! creating a property list for transfer properties (is collective when reading in parallel)
@ -1880,6 +1799,21 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
myStart(ubound(myStart)) = int(sum(writeSize(1:worldrank)),HSIZE_T)
totalShape = [myShape(1:ubound(myShape,1)-1),int(sum(writeSize),HSIZE_T)]
!--------------------------------------------------------------------------------------------------
! compress (and chunk) larger datasets
call h5pcreate_f(H5P_DATASET_CREATE_F, dcpl, hdferr)
if(hdferr < 0) error stop 'HDF5 error'
if(product(totalShape) >= chunkSize*2_HSIZE_T) then
call h5pset_chunk_f(dcpl, size(totalShape), getChunks(totalShape,chunkSize), hdferr)
if(hdferr < 0) error stop 'HDF5 error'
call h5pset_shuffle_f(dcpl, hdferr)
if(hdferr < 0) error stop 'HDF5 error'
call h5pset_deflate_f(dcpl, 6, hdferr)
if(hdferr < 0) error stop 'HDF5 error'
call h5pset_Fletcher32_f(dcpl,hdferr)
if(hdferr < 0) error stop 'HDF5 error'
endif
!--------------------------------------------------------------------------------------------------
! create dataspace in memory (local shape) and in file (global shape)
call h5screate_simple_f(size(myShape), myShape, memspace_id, hdferr, myShape)
@ -1889,11 +1823,29 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
!--------------------------------------------------------------------------------------------------
! create dataset in the file and select a hyperslab from it (the portion of the current process)
call h5dcreate_f(loc_id, trim(datasetName), datatype, filespace_id, dset_id, hdferr)
call h5dcreate_f(loc_id, trim(datasetName), datatype, filespace_id, dset_id, hdferr, dcpl)
if(hdferr < 0) error stop 'HDF5 error'
call h5sselect_hyperslab_f(filespace_id, H5S_SELECT_SET_F, myStart, myShape, hdferr)
if(hdferr < 0) error stop 'HDF5 error'
call h5pclose_f(dcpl , hdferr)
if(hdferr < 0) error stop 'HDF5 error'
contains
!------------------------------------------------------------------------------------------------
!> @brief determine chunk layout
!------------------------------------------------------------------------------------------------
pure function getChunks(totalShape,chunkSize)
integer(HSIZE_T), dimension(:), intent(in) :: totalShape
integer(HSIZE_T), intent(in) :: chunkSize
integer(HSIZE_T), dimension(size(totalShape)) :: getChunks
getChunks = [totalShape(1:size(totalShape)-1),&
chunkSize/product(totalShape(1:size(totalShape)-1))]
end function getChunks
end subroutine initialize_write
@ -1916,4 +1868,5 @@ subroutine finalize_write(plist_id, dset_id, filespace_id, memspace_id)
end subroutine finalize_write
end module HDF5_Utilities

View File

@ -454,10 +454,10 @@ end subroutine constitutive_plastic_LpAndItsTangents
module subroutine plastic_results
integer :: p
character(len=pStringLen) :: group
character(len=:), allocatable :: group
plasticityLoop: do p=1,size(material_name_phase)
group = trim('current/phase')//'/'//trim(material_name_phase(p))
group = '/current/phase/'//trim(material_name_phase(p))
call results_closeGroup(results_addGroup(group))
group = trim(group)//'/plastic'

View File

@ -734,9 +734,9 @@ end function crystallite_push33ToRef
subroutine crystallite_results
integer :: p,o
real(pReal), allocatable, dimension(:,:,:) :: selected_tensors
type(rotation), allocatable, dimension(:) :: selected_rotations
character(len=:), allocatable :: group,structureLabel
real(pReal), allocatable, dimension(:,:,:) :: selected_tensors
real(pReal), allocatable, dimension(:,:) :: selected_rotations
character(len=:), allocatable :: group,structureLabel
do p=1,size(material_name_phase)
group = trim('current/phase')//'/'//trim(material_name_phase(p))//'/mechanics'
@ -794,7 +794,8 @@ subroutine crystallite_results
end select
selected_rotations = select_rotations(crystallite_orientation,p)
call results_writeDataset(group,selected_rotations,output_constituent(p)%label(o),&
'crystal orientation as quaternion',structureLabel)
'crystal orientation as quaternion','q_0 <q_1 q_2 q_3>')
call results_addAttribute('Lattice',structureLabel,group//'/'//output_constituent(p)%label(o))
end select
enddo
enddo
@ -835,10 +836,10 @@ subroutine crystallite_results
integer, intent(in) :: instance
type(rotation), dimension(:,:,:), intent(in) :: dataset
type(rotation), allocatable, dimension(:) :: select_rotations
real(pReal), allocatable, dimension(:,:) :: select_rotations
integer :: e,i,c,j
allocate(select_rotations(count(material_phaseAt==instance)*homogenization_maxNconstituents*discretization_nIPs))
allocate(select_rotations(4,count(material_phaseAt==instance)*homogenization_maxNconstituents*discretization_nIPs))
j=0
do e = 1, size(material_phaseAt,2)
@ -846,7 +847,7 @@ subroutine crystallite_results
do c = 1, size(material_phaseAt,1) !ToDo: this needs to be changed for varying Ngrains
if (material_phaseAt(c,e) == instance) then
j = j + 1
select_rotations(j) = dataset(c,i,e)
select_rotations(1:4,j) = dataset(c,i,e)%asQuaternion()
endif
enddo
enddo

View File

@ -16,6 +16,7 @@ module grid_mech_FEM
use IO
use HDF5_utilities
use math
use rotations
use spectral_utilities
use FEsolving
use config

View File

@ -16,6 +16,7 @@ module grid_mech_spectral_basic
use IO
use HDF5_utilities
use math
use rotations
use spectral_utilities
use FEsolving
use config

View File

@ -16,6 +16,7 @@ module grid_mech_spectral_polarisation
use IO
use HDF5_utilities
use math
use rotations
use spectral_utilities
use FEsolving
use config

View File

@ -52,7 +52,7 @@ module material
HOMOGENIZATION_RGC_ID
end enum
character(len=pStringLen), public, protected, allocatable, dimension(:) :: &
character(len=:), public, protected, allocatable, dimension(:) :: &
material_name_phase, & !< name of each phase
material_name_homogenization !< name of each homogenization
@ -392,13 +392,21 @@ end subroutine sanityCheck
function getKeys(dict)
class(tNode), intent(in) :: dict
character(len=pStringLen), dimension(:), allocatable :: getKeys
character(len=:), dimension(:), allocatable :: getKeys
character(len=pStringLen), dimension(:), allocatable :: temp
integer :: i
integer :: i,l
allocate(getKeys(dict%length))
allocate(temp(dict%length))
l = 0
do i=1, dict%length
getKeys(i) = dict%getKey(i)
temp(i) = dict%getKey(i)
l = max(len_trim(temp(i)),l)
enddo
allocate(character(l)::getKeys(dict%length))
do i=1, dict%length
getKeys(i) = trim(temp(i))
enddo
end function getKeys

View File

@ -8,7 +8,6 @@ module results
use DAMASK_interface
use parallelization
use IO
use rotations
use HDF5_utilities
#ifdef PETSc
use PETSC
@ -20,27 +19,21 @@ module results
integer(HID_T) :: resultsFile
interface results_writeDataset
module procedure results_writeTensorDataset_real
module procedure results_writeVectorDataset_real
module procedure results_writeScalarDataset_real
module procedure results_writeTensorDataset_int
module procedure results_writeVectorDataset_int
module procedure results_writeScalarDataset_rotation
end interface results_writeDataset
interface results_addAttribute
module procedure results_addAttribute_real
module procedure results_addAttribute_int
module procedure results_addAttribute_str
module procedure results_addAttribute_int_array
module procedure results_addAttribute_real_array
end interface results_addAttribute
public :: &
@ -74,7 +67,7 @@ subroutine results_init(restart)
if(.not. restart) then
resultsFile = HDF5_openFile(trim(getSolverJobName())//'.hdf5','w',.true.)
call results_addAttribute('DADF5_version_major',0)
call results_addAttribute('DADF5_version_minor',10)
call results_addAttribute('DADF5_version_minor',11)
call results_addAttribute('DAMASK_version',DAMASKVERSION)
call get_command(commandLine)
call results_addAttribute('Call',trim(commandLine))
@ -465,46 +458,14 @@ subroutine results_writeTensorDataset_int(group,dataset,label,description,SIunit
end subroutine results_writeTensorDataset_int
!--------------------------------------------------------------------------------------------------
!> @brief stores a scalar dataset in a group
!--------------------------------------------------------------------------------------------------
subroutine results_writeScalarDataset_rotation(group,dataset,label,description,lattice_structure)
character(len=*), intent(in) :: label,group,description
character(len=*), intent(in), optional :: lattice_structure
type(rotation), intent(inout), dimension(:) :: dataset
integer(HID_T) :: groupHandle
groupHandle = results_openGroup(group)
#ifdef PETSc
call HDF5_write(groupHandle,dataset,label,.true.)
#else
call HDF5_write(groupHandle,dataset,label,.false.)
#endif
if (HDF5_objectExists(groupHandle,label)) &
call HDF5_addAttribute(groupHandle,'Description',description,label)
if (HDF5_objectExists(groupHandle,label) .and. present(lattice_structure)) &
call HDF5_addAttribute(groupHandle,'Lattice',lattice_structure,label)
if (HDF5_objectExists(groupHandle,label)) &
call HDF5_addAttribute(groupHandle,'Creator','DAMASK '//DAMASKVERSION,label)
if (HDF5_objectExists(groupHandle,label)) &
call HDF5_addAttribute(groupHandle,'Created',now(),label)
call HDF5_closeGroup(groupHandle)
end subroutine results_writeScalarDataset_rotation
!--------------------------------------------------------------------------------------------------
!> @brief adds the unique mapping from spatial position and constituent ID to results
!--------------------------------------------------------------------------------------------------
subroutine results_mapping_constituent(phaseAt,memberAtLocal,label)
integer, dimension(:,:), intent(in) :: phaseAt !< phase section at (constituent,element)
integer, dimension(:,:,:), intent(in) :: memberAtLocal !< phase member at (constituent,IP,element)
character(len=pStringLen), dimension(:), intent(in) :: label !< label of each phase section
integer, dimension(:,:,:), intent(in) :: memberAtLocal !< phase member at (constituent,IP,element)
character(len=*), dimension(:), intent(in) :: label !< label of each phase section
integer, dimension(size(memberAtLocal,1),size(memberAtLocal,2),size(memberAtLocal,3)) :: &
phaseAtMaterialpoint, &
@ -527,7 +488,6 @@ subroutine results_mapping_constituent(phaseAt,memberAtLocal,label)
plist_id, &
dt_id
integer(SIZE_T) :: type_size_string, type_size_int
integer :: hdferr, ierr, i
@ -571,10 +531,10 @@ subroutine results_mapping_constituent(phaseAt,memberAtLocal,label)
if(hdferr < 0) error stop 'HDF5 error'
memberOffset = 0
do i=1, size(label)
memberOffset(i,worldrank) = count(phaseAt == i)*size(memberAtLocal,2) ! number of points/instance of this process
memberOffset(i,worldrank) = count(phaseAt == i)*size(memberAtLocal,2) ! number of points/instance of this process
enddo
writeSize = 0
writeSize(worldrank) = size(memberAtLocal(1,:,:)) ! total number of points by this process
writeSize(worldrank) = size(memberAtLocal(1,:,:)) ! total number of points by this process
!--------------------------------------------------------------------------------------------------
! MPI settings and communication
@ -658,8 +618,8 @@ end subroutine results_mapping_constituent
subroutine results_mapping_homogenization(homogenizationAt,memberAtLocal,label)
integer, dimension(:), intent(in) :: homogenizationAt !< homogenization section at (element)
integer, dimension(:,:), intent(in) :: memberAtLocal !< homogenization member at (IP,element)
character(len=pStringLen), dimension(:), intent(in) :: label !< label of each homogenization section
integer, dimension(:,:), intent(in) :: memberAtLocal !< homogenization member at (IP,element)
character(len=*), dimension(:), intent(in) :: label !< label of each homogenization section
integer, dimension(size(memberAtLocal,1),size(memberAtLocal,2)) :: &
homogenizationAtMaterialpoint, &
@ -682,7 +642,6 @@ subroutine results_mapping_homogenization(homogenizationAt,memberAtLocal,label)
plist_id, &
dt_id
integer(SIZE_T) :: type_size_string, type_size_int
integer :: hdferr, ierr, i