diff --git a/DAMASK_prerequisites.sh b/DAMASK_prerequisites.sh index 25a2e46e0..181fd46b5 100755 --- a/DAMASK_prerequisites.sh +++ b/DAMASK_prerequisites.sh @@ -84,7 +84,7 @@ for executable in python python3; do done secondLevel "Details on $DEFAULT_PYTHON:" echo $(ls -la $(which $DEFAULT_PYTHON)) -for module in numpy scipy pandas;do +for module in numpy scipy pandas matplotlib yaml h5py;do thirdLevel $module $DEFAULT_PYTHON -c "import $module; \ print('Version: {}'.format($module.__version__)); \ @@ -94,10 +94,6 @@ thirdLevel vtk $DEFAULT_PYTHON -c "import vtk; \ print('Version: {}'.format(vtk.vtkVersion.GetVTKVersion())); \ print('Location: {}'.format(vtk.__file__))" -thirdLevel h5py -$DEFAULT_PYTHON -c "import h5py; \ - print('Version: {}'.format(h5py.version.version)); \ - print('Location: {}'.format(h5py.__file__))" firstLevel "GNU Compiler Collection" for executable in gcc g++ gfortran ;do diff --git a/PRIVATE b/PRIVATE index 866ed0725..f654a4143 160000 --- a/PRIVATE +++ b/PRIVATE @@ -1 +1 @@ -Subproject commit 866ed072549a3794f78339dedfb518a305c62d16 +Subproject commit f654a4143b1fbbecd137dc5d2193f5cf48ab1448 diff --git a/VERSION b/VERSION index 26e9be190..41ac4ffd3 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v3.0.0-alpha2-478-gc9e4dc21f +v3.0.0-alpha2-500-gfdb182b40 diff --git a/python/damask/_configmaterial.py b/python/damask/_configmaterial.py index 0adc494b8..1cda2e46b 100644 --- a/python/damask/_configmaterial.py +++ b/python/damask/_configmaterial.py @@ -103,7 +103,6 @@ class ConfigMaterial(Config): """Check for completeness.""" ok = True for top_level in ['homogenization','phase','material']: - # ToDo: With python 3.8 as prerequisite we can shorten with := ok &= top_level in self if top_level not in self: print(f'{top_level} entry missing') @@ -203,7 +202,7 @@ class ConfigMaterial(Config): """ dup = self.copy() for i,m in enumerate(dup['material']): - if ID and i not in ID: continue + if ID is not None and i not in ID: continue for c in m['constituents']: if constituent is not None and c not in constituent: continue try: @@ -227,7 +226,7 @@ class ConfigMaterial(Config): """ dup = self.copy() for i,m in enumerate(dup['material']): - if ID and i not in ID: continue + if ID is not None and i not in ID: continue try: m['homogenization'] = mapping[m['homogenization']] except KeyError: @@ -249,7 +248,7 @@ class ConfigMaterial(Config): Examples -------- >>> import damask - >>> O = damask.Rotation.from_random(3).as_quaternion() + >>> O = damask.Rotation.from_random(3) >>> phase = ['Aluminum','Steel','Aluminum'] >>> m = damask.ConfigMaterial().material_add(constituents={'phase':phase,'O':O}, ... homogenization='SX') diff --git a/python/damask/_grid.py b/python/damask/_grid.py index 103538349..8125fb045 100644 --- a/python/damask/_grid.py +++ b/python/damask/_grid.py @@ -202,7 +202,7 @@ class Grid: Geometry file to read. """ - warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.1.0', DeprecationWarning) + warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.1.0', DeprecationWarning,2) try: f = open(fname) except TypeError: @@ -541,7 +541,7 @@ class Grid: Compress geometry with 'x of y' and 'a to b'. """ - warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.1.0', DeprecationWarning) + warnings.warn('Support for ASCII-based geom format will be removed in DAMASK 3.1.0', DeprecationWarning,2) header = [f'{len(self.comments)+4} header'] + self.comments \ + ['grid a {} b {} c {}'.format(*self.cells), 'size x {} y {} z {}'.format(*self.size), @@ -760,7 +760,7 @@ class Grid: """ if fill is None: fill = np.nanmax(self.material) + 1 - dtype = float if np.isnan(fill) or int(fill) != fill or self.material.dtype==np.float else int + dtype = float if isinstance(fill,float) or self.material.dtype in np.sctypes['float'] else int material = self.material # These rotations are always applied in the reference coordinate system, i.e. (z,x,z) not (z,x',z'') diff --git a/python/damask/_rotation.py b/python/damask/_rotation.py index 4109c181e..86311546e 100644 --- a/python/damask/_rotation.py +++ b/python/damask/_rotation.py @@ -125,9 +125,18 @@ class Rotation: return np.logical_not(self==other) + def __array__(self): + """Initializer for numpy.""" + return self.quaternion + + + @property + def size(self): + return self.quaternion[...,0].size + @property def shape(self): - return self.quaternion.shape[:-1] + return self.quaternion[...,0].shape def __len__(self): diff --git a/python/damask/_vtk.py b/python/damask/_vtk.py index 00a07efa5..b9f237297 100644 --- a/python/damask/_vtk.py +++ b/python/damask/_vtk.py @@ -246,8 +246,8 @@ class VTK: raise ValueError('No label defined for numpy.ndarray') N_data = data.shape[0] - d = np_to_vtk((data.astype(np.float32) if data.dtype in [np.float64, np.float128] - else data).reshape(N_data,-1),deep=True) # avoid large files + d = np_to_vtk((data.astype(np.single) if data.dtype in [np.double, np.longdouble] else + data).reshape(N_data,-1),deep=True) # avoid large files d.SetName(label) if N_data == N_points: diff --git a/python/damask/util.py b/python/damask/util.py index fb122bd11..cda532bc0 100644 --- a/python/damask/util.py +++ b/python/damask/util.py @@ -183,7 +183,7 @@ def scale_to_coprime(v): # Python 3.9 provides math.lcm, see https://stackoverflow.com/questions/51716916. return a * b // np.gcd(a, b) - m = (np.array(v) * reduce(lcm, map(lambda x: int(get_square_denominator(x)),v)) ** 0.5).astype(np.int) + m = (np.array(v) * reduce(lcm, map(lambda x: int(get_square_denominator(x)),v)) ** 0.5).astype(int) m = m//reduce(np.gcd,m) with np.errstate(invalid='ignore'): diff --git a/python/setup.py b/python/setup.py index 19fbdcd13..0642c0b7d 100644 --- a/python/setup.py +++ b/python/setup.py @@ -6,28 +6,29 @@ with open(Path(__file__).parent/'damask/VERSION') as f: version = re.sub(r'(-([^-]*)).*$',r'.\2',re.sub(r'^v(\d+\.\d+(\.\d+)?)',r'\1',f.readline().strip())) setuptools.setup( - name="damask", + name='damask', version=version, - author="The DAMASK team", - author_email="damask@mpie.de", - description="DAMASK library", - long_description="Python library for pre and post processing of DAMASK simulations", - url="https://damask.mpie.de", + author='The DAMASK team', + author_email='damask@mpie.de', + description='DAMASK library', + long_description='Python library for pre and post processing of DAMASK simulations', + url='https://damask.mpie.de', packages=setuptools.find_packages(), include_package_data=True, + python_requires = '>=3.6', install_requires = [ - "pandas", # requires numpy - "scipy", - "h5py", # requires numpy - "vtk", - "matplotlib", # requires numpy, pillow - "pyaml" + 'pandas>=0.24', # requires numpy + 'scipy>=1.2', + 'h5py>=2.9', # requires numpy + 'vtk>=8.1', + 'matplotlib>=3.0', # requires numpy, pillow + 'pyaml>=3.12' ], classifiers = [ - "Intended Audience :: Science/Research", - "Topic :: Scientific/Engineering", - "Programming Language :: Python :: 3", - "License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)", - "Operating System :: OS Independent", + 'Intended Audience :: Science/Research', + 'Topic :: Scientific/Engineering', + 'Programming Language :: Python :: 3', + 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)', + 'Operating System :: OS Independent', ], ) diff --git a/python/tests/test_Grid.py b/python/tests/test_Grid.py index 48831f917..a239165db 100644 --- a/python/tests/test_Grid.py +++ b/python/tests/test_Grid.py @@ -347,7 +347,7 @@ class TestGrid: @pytest.mark.parametrize('approach',['Laguerre','Voronoi']) def test_tessellate_bicrystal(self,approach): cells = np.random.randint(5,10,3)*2 - size = cells.astype(np.float) + size = cells.astype(float) seeds = np.vstack((size*np.array([0.5,0.25,0.5]),size*np.array([0.5,0.75,0.5]))) material = np.zeros(cells) material[:,cells[1]//2:,:] = 1 diff --git a/python/tests/test_Rotation.py b/python/tests/test_Rotation.py index 6bee44e7f..b28a849c5 100644 --- a/python/tests/test_Rotation.py +++ b/python/tests/test_Rotation.py @@ -689,6 +689,10 @@ class TestRotation: with pytest.raises(TypeError): Rotation(np.ones(3)) + def test_to_numpy(self): + r = Rotation.from_random(np.random.randint(0,10,4)) + assert np.all(r.as_quaternion() == np.array(r)) + @pytest.mark.parametrize('degrees',[True,False]) def test_Eulers(self,set_of_rotations,degrees): for rot in set_of_rotations: @@ -804,7 +808,11 @@ class TestRotation: r = Rotation.from_random() assert r == ~~r - @pytest.mark.parametrize('shape',[None,1,(1,),(4,2),(1,1,1)]) + @pytest.mark.parametrize('shape',[1,(1,),(4,2),(1,1,1),tuple(np.random.randint(0,10,4))]) + def test_size(self,shape): + assert Rotation.from_random(shape).size == np.prod(shape) + + @pytest.mark.parametrize('shape',[None,1,(1,),(4,2),(1,1,1),tuple(np.random.randint(0,10,4))]) def test_shape(self,shape): r = Rotation.from_random(shape=shape) assert r.shape == (shape if isinstance(shape,tuple) else (shape,) if shape else ()) diff --git a/src/CPFEM2.f90 b/src/CPFEM2.f90 index e57de7f67..bf044bef9 100644 --- a/src/CPFEM2.f90 +++ b/src/CPFEM2.f90 @@ -75,7 +75,6 @@ end subroutine CPFEM_initAll subroutine CPFEM_init integer(HID_T) :: fileHandle - character(len=pStringLen) :: fileName print'(/,a)', ' <<<+- CPFEM init -+>>>'; flush(IO_STDOUT) @@ -83,8 +82,8 @@ subroutine CPFEM_init if (interface_restartInc > 0) then print'(/,a,i0,a)', ' reading restart information of increment from file'; flush(IO_STDOUT) - write(fileName,'(a,i0,a)') trim(getSolverJobName())//'_',worldrank,'.hdf5' - fileHandle = HDF5_openFile(fileName) + + fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','r') call homogenization_restartRead(fileHandle) call phase_restartRead(fileHandle) @@ -101,13 +100,11 @@ end subroutine CPFEM_init subroutine CPFEM_restartWrite integer(HID_T) :: fileHandle - character(len=pStringLen) :: fileName print*, ' writing field and constitutive data required for restart to file';flush(IO_STDOUT) - write(fileName,'(a,i0,a)') trim(getSolverJobName())//'_',worldrank,'.hdf5' - fileHandle = HDF5_openFile(fileName,'a') + fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','a') call homogenization_restartWrite(fileHandle) call phase_restartWrite(fileHandle) diff --git a/src/HDF5_utilities.f90 b/src/HDF5_utilities.f90 index 781089ad8..ce00c4913 100644 --- a/src/HDF5_utilities.f90 +++ b/src/HDF5_utilities.f90 @@ -71,6 +71,12 @@ module HDF5_utilities module procedure HDF5_addAttribute_real_array end interface HDF5_addAttribute +#ifdef PETSc + logical, parameter, private :: parallel_default = .true. +#else + logical, parameter, private :: parallel_default = .false. +#endif + contains @@ -105,16 +111,16 @@ end subroutine HDF5_utilities_init !-------------------------------------------------------------------------------------------------- !> @brief open and initializes HDF5 output file !-------------------------------------------------------------------------------------------------- -integer(HID_T) function HDF5_openFile(fileName,mode,parallel) +integer(HID_T) function HDF5_openFile(fileName,mode) character(len=*), intent(in) :: fileName character, intent(in), optional :: mode - logical, intent(in), optional :: parallel character :: m integer(HID_T) :: plist_id integer :: hdferr + if (present(mode)) then m = mode else @@ -125,10 +131,8 @@ integer(HID_T) function HDF5_openFile(fileName,mode,parallel) if(hdferr < 0) error stop 'HDF5 error' #ifdef PETSc - if (present(parallel)) then; if (parallel) then - call h5pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr) - if(hdferr < 0) error stop 'HDF5 error' - endif; endif + call h5pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr) + if(hdferr < 0) error stop 'HDF5 error' #endif if (m == 'w') then @@ -547,7 +551,7 @@ subroutine HDF5_read_real1(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_DOUBLE,dataset,totalShape, hdferr,& @@ -587,7 +591,7 @@ subroutine HDF5_read_real2(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_DOUBLE,dataset,totalShape, hdferr,& @@ -627,7 +631,7 @@ subroutine HDF5_read_real3(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_DOUBLE,dataset,totalShape, hdferr,& @@ -667,7 +671,7 @@ subroutine HDF5_read_real4(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_DOUBLE,dataset,totalShape, hdferr,& @@ -707,7 +711,7 @@ subroutine HDF5_read_real5(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_DOUBLE,dataset,totalShape, hdferr,& @@ -747,7 +751,7 @@ subroutine HDF5_read_real6(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_DOUBLE,dataset,totalShape, hdferr,& @@ -787,7 +791,7 @@ subroutine HDF5_read_real7(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_DOUBLE,dataset,totalShape, hdferr,& @@ -829,7 +833,7 @@ subroutine HDF5_read_int1(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_INTEGER,dataset,totalShape, hdferr,& @@ -869,7 +873,7 @@ subroutine HDF5_read_int2(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_INTEGER,dataset,totalShape, hdferr,& @@ -909,7 +913,7 @@ subroutine HDF5_read_int3(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_INTEGER,dataset,totalShape, hdferr,& @@ -949,7 +953,7 @@ subroutine HDF5_read_int4(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_INTEGER,dataset,totalShape, hdferr,& @@ -989,7 +993,7 @@ subroutine HDF5_read_int5(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_INTEGER,dataset,totalShape, hdferr,& @@ -1029,7 +1033,7 @@ subroutine HDF5_read_int6(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_INTEGER,dataset,totalShape, hdferr,& @@ -1069,7 +1073,7 @@ subroutine HDF5_read_int7(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,parallel) else call initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_id, & - myStart, totalShape, loc_id,myShape,datasetName,.false.) + myStart, totalShape, loc_id,myShape,datasetName,parallel_default) endif call h5dread_f(dset_id, H5T_NATIVE_INTEGER,dataset,totalShape, hdferr,& @@ -1109,7 +1113,7 @@ subroutine HDF5_write_real1(loc_id,dataset,datasetName,parallel) myStart, totalShape,loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape,loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,.false.) + myStart, totalShape,loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel_default) endif if (product(totalShape) /= 0) then @@ -1150,7 +1154,7 @@ subroutine HDF5_write_real2(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,.false.) + myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel_default) endif if (product(totalShape) /= 0) then @@ -1191,7 +1195,7 @@ subroutine HDF5_write_real3(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,.false.) + myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel_default) endif if (product(totalShape) /= 0) then @@ -1232,7 +1236,7 @@ subroutine HDF5_write_real4(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,.false.) + myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel_default) endif if (product(totalShape) /= 0) then @@ -1274,7 +1278,7 @@ subroutine HDF5_write_real5(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,.false.) + myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel_default) endif if (product(totalShape) /= 0) then @@ -1315,7 +1319,7 @@ subroutine HDF5_write_real6(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,.false.) + myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel_default) endif if (product(totalShape) /= 0) then @@ -1356,7 +1360,7 @@ subroutine HDF5_write_real7(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,.false.) + myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_DOUBLE,parallel_default) endif if (product(totalShape) /= 0) then @@ -1398,7 +1402,7 @@ subroutine HDF5_write_int1(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,.false.) + myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel_default) endif if (product(totalShape) /= 0) then @@ -1439,7 +1443,7 @@ subroutine HDF5_write_int2(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,.false.) + myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel_default) endif if (product(totalShape) /= 0) then @@ -1480,7 +1484,7 @@ subroutine HDF5_write_int3(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,.false.) + myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel_default) endif if (product(totalShape) /= 0) then @@ -1521,7 +1525,7 @@ subroutine HDF5_write_int4(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,.false.) + myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel_default) endif if (product(totalShape) /= 0) then @@ -1562,7 +1566,7 @@ subroutine HDF5_write_int5(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,.false.) + myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel_default) endif if (product(totalShape) /= 0) then @@ -1603,7 +1607,7 @@ subroutine HDF5_write_int6(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,.false.) + myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel_default) endif if (product(totalShape) /= 0) then @@ -1644,7 +1648,7 @@ subroutine HDF5_write_int7(loc_id,dataset,datasetName,parallel) myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel) else call initialize_write(dset_id, filespace_id, memspace_id, plist_id, & - myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,.false.) + myStart, totalShape, loc_id,myShape,datasetName,H5T_NATIVE_INTEGER,parallel_default) endif if (product(totalShape) /= 0) then diff --git a/src/grid/DAMASK_grid.f90 b/src/grid/DAMASK_grid.f90 index ed1ada171..d250e2f53 100644 --- a/src/grid/DAMASK_grid.f90 +++ b/src/grid/DAMASK_grid.f90 @@ -225,8 +225,8 @@ program DAMASK_grid loadCases(l)%t = step_discretization%get_asFloat('t') loadCases(l)%N = step_discretization%get_asInt ('N') loadCases(l)%r = step_discretization%get_asFloat('r', defaultVal= 1.0_pReal) - loadCases(l)%f_restart = step_discretization%get_asInt ('f_restart', defaultVal=huge(0)) + loadCases(l)%f_restart = load_step%get_asInt('f_restart', defaultVal=huge(0)) loadCases(l)%f_out = load_step%get_asInt('f_out', defaultVal=1) loadCases(l)%estimate_rate = (load_step%get_asBool('estimate_rate',defaultVal=.true.) .and. & merge(.true.,.false.,l > 1)) diff --git a/src/grid/discretization_grid.f90 b/src/grid/discretization_grid.f90 index 48ad5b7e1..bb6fa9d8d 100644 --- a/src/grid/discretization_grid.f90 +++ b/src/grid/discretization_grid.f90 @@ -68,8 +68,11 @@ subroutine discretization_grid_init(restart) print'(/,a)', ' <<<+- discretization_grid init -+>>>'; flush(IO_STDOUT) - if(worldrank == 0) call readVTR(grid,geomSize,origin,materialAt_global) - + if(worldrank == 0) then + call readVTR(grid,geomSize,origin,materialAt_global) + else + allocate(materialAt_global(0)) ! needed for IntelMPI + endif call MPI_Bcast(grid,3,MPI_INTEGER,0,PETSC_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' diff --git a/src/grid/grid_mech_FEM.f90 b/src/grid/grid_mech_FEM.f90 index 806973a4d..609561c80 100644 --- a/src/grid/grid_mech_FEM.f90 +++ b/src/grid/grid_mech_FEM.f90 @@ -108,8 +108,6 @@ subroutine grid_mechanical_FEM_init u_current,u_lastInc PetscInt, dimension(0:worldsize-1) :: localK integer(HID_T) :: fileHandle, groupHandle - character(len=pStringLen) :: & - fileName class(tNode), pointer :: & num_grid, & debug_grid @@ -234,8 +232,7 @@ subroutine grid_mechanical_FEM_init restartRead: if (interface_restartInc > 0) then print'(/,a,i0,a)', ' reading restart data of increment ', interface_restartInc, ' from file' - write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' - fileHandle = HDF5_openFile(fileName) + fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','r') groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(groupHandle,P_aim, 'P_aim') @@ -422,7 +419,7 @@ subroutine grid_mechanical_FEM_restartWrite PetscErrorCode :: ierr integer(HID_T) :: fileHandle, groupHandle PetscScalar, dimension(:,:,:,:), pointer :: u_current,u_lastInc - character(len=pStringLen) :: fileName + call DMDAVecGetArrayF90(mechanical_grid,solution_current,u_current,ierr) CHKERRQ(ierr) @@ -431,8 +428,7 @@ subroutine grid_mechanical_FEM_restartWrite print*, 'writing solver data required for restart to file'; flush(IO_STDOUT) - write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' - fileHandle = HDF5_openFile(fileName,'w') + fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','w') groupHandle = HDF5_addGroup(fileHandle,'solver') call HDF5_write(groupHandle,P_aim, 'P_aim') diff --git a/src/grid/grid_mech_spectral_basic.f90 b/src/grid/grid_mech_spectral_basic.f90 index f3f30c0af..faf58c85e 100644 --- a/src/grid/grid_mech_spectral_basic.f90 +++ b/src/grid/grid_mech_spectral_basic.f90 @@ -99,8 +99,6 @@ subroutine grid_mechanical_spectral_basic_init PetscInt, dimension(0:worldsize-1) :: localK integer(HID_T) :: fileHandle, groupHandle integer :: fileUnit - character(len=pStringLen) :: & - fileName class (tNode), pointer :: & num_grid, & debug_grid @@ -182,8 +180,7 @@ subroutine grid_mechanical_spectral_basic_init restartRead: if (interface_restartInc > 0) then print'(/,a,i0,a)', ' reading restart data of increment ', interface_restartInc, ' from file' - write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' - fileHandle = HDF5_openFile(fileName) + fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','r') groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(groupHandle,P_aim, 'P_aim') @@ -365,14 +362,12 @@ subroutine grid_mechanical_spectral_basic_restartWrite PetscErrorCode :: ierr integer(HID_T) :: fileHandle, groupHandle PetscScalar, dimension(:,:,:,:), pointer :: F - character(len=pStringLen) :: fileName call DMDAVecGetArrayF90(da,solution_vec,F,ierr); CHKERRQ(ierr) print*, 'writing solver data required for restart to file'; flush(IO_STDOUT) - write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' - fileHandle = HDF5_openFile(fileName,'w') + fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','w') groupHandle = HDF5_addGroup(fileHandle,'solver') call HDF5_write(groupHandle,P_aim, 'P_aim') diff --git a/src/grid/grid_mech_spectral_polarisation.f90 b/src/grid/grid_mech_spectral_polarisation.f90 index 633fced7f..8caf41b31 100644 --- a/src/grid/grid_mech_spectral_polarisation.f90 +++ b/src/grid/grid_mech_spectral_polarisation.f90 @@ -112,8 +112,6 @@ subroutine grid_mechanical_spectral_polarisation_init PetscInt, dimension(0:worldsize-1) :: localK integer(HID_T) :: fileHandle, groupHandle integer :: fileUnit - character(len=pStringLen) :: & - fileName class (tNode), pointer :: & num_grid, & debug_grid @@ -204,8 +202,7 @@ subroutine grid_mechanical_spectral_polarisation_init restartRead: if (interface_restartInc > 0) then print'(/,a,i0,a)', ' reading restart data of increment ', interface_restartInc, ' from file' - write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' - fileHandle = HDF5_openFile(fileName) + fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','r') groupHandle = HDF5_openGroup(fileHandle,'solver') call HDF5_read(groupHandle,P_aim, 'P_aim') @@ -419,7 +416,6 @@ subroutine grid_mechanical_spectral_polarisation_restartWrite PetscErrorCode :: ierr integer(HID_T) :: fileHandle, groupHandle PetscScalar, dimension(:,:,:,:), pointer :: FandF_tau, F, F_tau - character(len=pStringLen) :: fileName call DMDAVecGetArrayF90(da,solution_vec,FandF_tau,ierr); CHKERRQ(ierr) F => FandF_tau(0: 8,:,:,:) @@ -427,8 +423,7 @@ subroutine grid_mechanical_spectral_polarisation_restartWrite print*, 'writing solver data required for restart to file'; flush(IO_STDOUT) - write(fileName,'(a,a,i0,a)') trim(getSolverJobName()),'_',worldrank,'.hdf5' - fileHandle = HDF5_openFile(fileName,'w') + fileHandle = HDF5_openFile(getSolverJobName()//'_restart.hdf5','w') groupHandle = HDF5_addGroup(fileHandle,'solver') call HDF5_write(groupHandle,F_aim, 'P_aim') diff --git a/src/homogenization.f90 b/src/homogenization.f90 index 3e1b939b5..497b6cbaf 100644 --- a/src/homogenization.f90 +++ b/src/homogenization.f90 @@ -451,7 +451,7 @@ subroutine homogenization_restartWrite(fileHandle) groupHandle(2) = HDF5_addGroup(groupHandle(1),material_name_homogenization(ho)) - call HDF5_read(groupHandle(2),homogState(ho)%state,'omega') ! ToDo: should be done by mech + call HDF5_write(groupHandle(2),homogState(ho)%state,'omega') ! ToDo: should be done by mech call HDF5_closeGroup(groupHandle(2)) @@ -478,7 +478,7 @@ subroutine homogenization_restartRead(fileHandle) groupHandle(2) = HDF5_openGroup(groupHandle(1),material_name_homogenization(ho)) - call HDF5_write(groupHandle(2),homogState(ho)%state,'omega') ! ToDo: should be done by mech + call HDF5_read(groupHandle(2),homogState(ho)%state,'omega') ! ToDo: should be done by mech call HDF5_closeGroup(groupHandle(2)) diff --git a/src/results.f90 b/src/results.f90 index 7cb518e83..c9c51b9a2 100644 --- a/src/results.f90 +++ b/src/results.f90 @@ -65,7 +65,7 @@ subroutine results_init(restart) print*, 'https://doi.org/10.1007/s40192-017-0084-5'//IO_EOL if(.not. restart) then - resultsFile = HDF5_openFile(trim(getSolverJobName())//'.hdf5','w',.true.) + resultsFile = HDF5_openFile(getSolverJobName()//'.hdf5','w') call results_addAttribute('DADF5_version_major',0) call results_addAttribute('DADF5_version_minor',11) call results_addAttribute('DAMASK_version',DAMASKVERSION) @@ -83,7 +83,7 @@ end subroutine results_init !-------------------------------------------------------------------------------------------------- subroutine results_openJobFile - resultsFile = HDF5_openFile(trim(getSolverJobName())//'.hdf5','a',.true.) + resultsFile = HDF5_openFile(getSolverJobName()//'.hdf5','a') end subroutine results_openJobFile @@ -289,11 +289,7 @@ subroutine results_writeScalarDataset_real(group,dataset,label,description,SIuni groupHandle = results_openGroup(group) -#ifdef PETSc - call HDF5_write(groupHandle,dataset,label,.true.) -#else - call HDF5_write(groupHandle,dataset,label,.false.) -#endif + call HDF5_write(groupHandle,dataset,label) if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Description',description,label) @@ -320,11 +316,7 @@ subroutine results_writeVectorDataset_real(group,dataset,label,description,SIuni groupHandle = results_openGroup(group) -#ifdef PETSc - call HDF5_write(groupHandle,dataset,label,.true.) -#else - call HDF5_write(groupHandle,dataset,label,.false.) -#endif + call HDF5_write(groupHandle,dataset,label) if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Description',description,label) @@ -362,7 +354,7 @@ subroutine results_writeTensorDataset_real(group,dataset,label,description,SIuni endif if(transposed_) then - if(size(dataset,1) /= size(dataset,2)) call IO_error(0,ext_msg='transpose non-symmetric tensor') + if(size(dataset,1) /= size(dataset,2)) error stop 'transpose non-symmetric tensor' allocate(dataset_transposed,mold=dataset) do i=1,size(dataset_transposed,3) dataset_transposed(:,:,i) = transpose(dataset(:,:,i)) @@ -373,11 +365,7 @@ subroutine results_writeTensorDataset_real(group,dataset,label,description,SIuni groupHandle = results_openGroup(group) -#ifdef PETSc - call HDF5_write(groupHandle,dataset_transposed,label,.true.) -#else - call HDF5_write(groupHandle,dataset_transposed,label,.false.) -#endif + call HDF5_write(groupHandle,dataset_transposed,label) if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Description',description,label) @@ -405,11 +393,7 @@ subroutine results_writeVectorDataset_int(group,dataset,label,description,SIunit groupHandle = results_openGroup(group) -#ifdef PETSc - call HDF5_write(groupHandle,dataset,label,.true.) -#else - call HDF5_write(groupHandle,dataset,label,.false.) -#endif + call HDF5_write(groupHandle,dataset,label) if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Description',description,label) @@ -437,11 +421,7 @@ subroutine results_writeTensorDataset_int(group,dataset,label,description,SIunit groupHandle = results_openGroup(group) -#ifdef PETSc - call HDF5_write(groupHandle,dataset,label,.true.) -#else - call HDF5_write(groupHandle,dataset,label,.false.) -#endif + call HDF5_write(groupHandle,dataset,label) if (HDF5_objectExists(groupHandle,label)) & call HDF5_addAttribute(groupHandle,'Description',description,label) @@ -577,7 +557,7 @@ subroutine results_mapping_phase(phaseAt,memberAtLocal,label) !-------------------------------------------------------------------------------------------------- ! write the components of the compound type individually - call h5pset_preserve_f(plist_id, .TRUE., hdferr) + call h5pset_preserve_f(plist_id, .true., hdferr) if(hdferr < 0) error stop 'HDF5 error' loc_id = results_openGroup('/mapping') @@ -733,7 +713,8 @@ subroutine results_mapping_homogenization(homogenizationAt,memberAtLocal,label) !-------------------------------------------------------------------------------------------------- ! write the components of the compound type individually - call h5pset_preserve_f(plist_id, .TRUE., hdferr) + call h5pset_preserve_f(plist_id, .true., hdferr) + if(hdferr < 0) error stop 'HDF5 error' loc_id = results_openGroup('/mapping') call h5dcreate_f(loc_id, 'homogenization', dtype_id, filespace_id, dset_id, hdferr)