import sys import random import pytest import numpy as np from scipy import stats import h5py from damask import util class TestUtil: @pytest.mark.xfail(sys.platform == 'win32', reason='echo is not a Windows command') def test_run_direct(self): out,err = util.run('echo test') assert out=='test\n' and err=='' @pytest.mark.xfail(sys.platform == 'win32', reason='echo is not a Windows command') def test_run_env(self): out,err = util.run('sh -c "echo $test_for_execute"',env={'test_for_execute':'test'}) assert out=='test\n' and err=='' @pytest.mark.xfail(sys.platform == 'win32', reason='false is not a Windows command') def test_run_runtime_error(self): with pytest.raises(RuntimeError): util.run('false') @pytest.mark.parametrize('input,glue,quote,output', [ (None,'',False,'None'), ([None,None],'\n',False,'None\nNone'), ([-0.5,0.5],'=',False,'-0.5=0.5'), ([1,2,3],'_',False,'1_2_3'), ([1,2,3],'/',True,'"1"/"2"/"3"'), ]) def test_srepr(self,input,glue,quote,output): assert output == util.srepr(input,glue,quote) @pytest.mark.parametrize('input,output', [ ([0,-2],[0,-1]), ([-0.5,0.5],[-1,1]), ([1./2.,1./3.],[3,2]), ([2./3.,1./2.,1./3.],[4,3,2]), ]) def test_scale2coprime(self,input,output): assert np.allclose(util.scale_to_coprime(np.array(input)), np.array(output).astype(int)) def test_lackofprecision(self): with pytest.raises(ValueError): util.scale_to_coprime(np.array([1/333.333,1,1])) @pytest.mark.parametrize('rv',[stats.rayleigh(),stats.weibull_min(1.2),stats.halfnorm(),stats.pareto(2.62)]) def test_hybridIA_distribution(self,rv): bins = np.linspace(0,10,100000) centers = (bins[1:]+bins[:-1])/2 N_samples = bins.shape[0]-1000 dist = rv.pdf(centers) selected = util.hybrid_IA(dist,N_samples) dist_sampled = np.histogram(centers[selected],bins)[0]/N_samples*np.sum(dist) assert np.sqrt(((dist - dist_sampled) ** 2).mean()) < .025 and selected.shape[0]==N_samples def test_hybridIA_constant(self): N_bins = np.random.randint(20,400) m = np.random.randint(1,20) N_samples = m * N_bins dist = np.ones(N_bins)*np.random.rand() assert np.all(np.sort(util.hybrid_IA(dist,N_samples))==np.arange(N_samples).astype(int)//m) def test_hybridIA_linear(self): N_points = np.random.randint(10,200) m = np.random.randint(1,20) dist = np.arange(N_points) N_samples = m * np.sum(dist) assert np.all(np.bincount(util.hybrid_IA(dist*np.random.rand(),N_samples)) == dist*m) @pytest.mark.parametrize('point,direction,normalize,keepdims,answer', [ ([1,0,0],'z',False,True, [1,0,0]), ([1,0,0],'z',True, False,[1,0]), ([0,1,1],'z',False,True, [0,0.5,0]), ([0,1,1],'y',True, False,[0.41421356,0]), ([1,1,0],'x',False,False,[0.5,0]), ([1,1,1],'y',True, True, [0.3660254, 0,0.3660254]), ]) def test_project_equal_angle(self,point,direction,normalize,keepdims,answer): assert np.allclose(util.project_equal_angle(np.array(point),direction=direction, normalize=normalize,keepdims=keepdims),answer) @pytest.mark.parametrize('point,direction,normalize,keepdims,answer', [ ([1,0,0],'z',False,True, [1,0,0]), ([1,0,0],'z',True, False,[1,0]), ([0,1,1],'z',False,True, [0,0.70710678,0]), ([0,1,1],'y',True, False,[0.5411961,0]), ([1,1,0],'x',False,False,[0.70710678,0]), ([1,1,1],'y',True, True, [0.45970084,0,0.45970084]), ]) def test_project_equal_area(self,point,direction,normalize,keepdims,answer): assert np.allclose(util.project_equal_area(np.array(point),direction=direction, normalize=normalize,keepdims=keepdims),answer) @pytest.mark.parametrize('fro,to,mode,answer', [ ((),(1,),'left',(1,)), ((1,),(7,),'right',(1,)), ((1,2),(1,1,2,2),'right',(1,1,2,1)), ((1,2),(1,1,2,2),'left',(1,1,1,2)), ((1,2,3),(1,1,2,3,4),'right',(1,1,2,3,1)), ((10,2),(10,3,2,2,),'right',(10,1,2,1)), ((10,2),(10,3,2,2,),'left',(10,1,1,2)), ((2,2,3),(2,2,2,3,4),'left',(1,2,2,3,1)), ((2,2,3),(2,2,2,3,4),'right',(2,2,1,3,1)), ]) def test_shapeshifter(self,fro,to,mode,answer): assert util.shapeshifter(fro,to,mode) == answer @pytest.mark.parametrize('fro,to,mode', [ ((10,3,4),(10,3,2,2),'left'), ((2,3),(10,3,2,2),'right'), ]) def test_invalid_shapeshifter(self,fro,to,mode): with pytest.raises(ValueError): util.shapeshifter(fro,to,mode) @pytest.mark.parametrize('a,b,answer', [ ((),(1,),(1,)), ((1,),(),(1,)), ((1,),(7,),(1,7)), ((2,),(2,2),(2,2)), ((1,2),(2,2),(1,2,2)), ((1,2,3),(2,3,4),(1,2,3,4)), ((1,2,3),(1,2,3),(1,2,3)), ]) def test_shapeblender(self,a,b,answer): assert util.shapeblender(a,b) == answer @pytest.mark.parametrize('style',[util.emph,util.deemph,util.warn,util.strikeout]) def test_decorate(self,style): assert 'DAMASK' in style('DAMASK') @pytest.mark.parametrize('complete',[True,False]) def test_D3D_base_group(self,tmp_path,complete): base_group = ''.join(random.choices('DAMASK', k=10)) with h5py.File(tmp_path/'base_group.dream3d','w') as f: f.create_group('/'.join((base_group,'_SIMPL_GEOMETRY'))) if complete: f['/'.join((base_group,'_SIMPL_GEOMETRY'))].create_dataset('SPACING',data=np.ones(3)) if complete: assert base_group == util.DREAM3D_base_group(tmp_path/'base_group.dream3d') else: with pytest.raises(ValueError): util.DREAM3D_base_group(tmp_path/'base_group.dream3d') @pytest.mark.parametrize('complete',[True,False]) def test_D3D_cell_data_group(self,tmp_path,complete): base_group = ''.join(random.choices('DAMASK', k=10)) cell_data_group = ''.join(random.choices('KULeuven', k=10)) cells = np.random.randint(1,50,3) with h5py.File(tmp_path/'cell_data_group.dream3d','w') as f: f.create_group('/'.join((base_group,'_SIMPL_GEOMETRY'))) f['/'.join((base_group,'_SIMPL_GEOMETRY'))].create_dataset('SPACING',data=np.ones(3)) f['/'.join((base_group,'_SIMPL_GEOMETRY'))].create_dataset('DIMENSIONS',data=cells[::-1]) f[base_group].create_group(cell_data_group) if complete: f['/'.join((base_group,cell_data_group))].create_dataset('data',shape=np.append(cells,1)) if complete: assert cell_data_group == util.DREAM3D_cell_data_group(tmp_path/'cell_data_group.dream3d') else: with pytest.raises(ValueError): util.DREAM3D_cell_data_group(tmp_path/'cell_data_group.dream3d') @pytest.mark.parametrize('full,reduced',[({}, {}), ({'A':{}}, {}), ({'A':{'B':{}}}, {}), ({'A':{'B':'C'}},)*2, ({'A':{'B':{},'C':'D'}}, {'A':{'C':'D'}})]) def test_prune(self,full,reduced): assert util.dict_prune(full) == reduced @pytest.mark.parametrize('full,reduced',[({}, {}), ({'A':{}}, {}), ({'A':'F'}, 'F'), ({'A':{'B':{}}}, {}), ({'A':{'B':'C'}}, 'C'), ({'A':1,'B':2},)*2, ({'A':{'B':'C','D':'E'}}, {'B':'C','D':'E'}), ({'B':'C','D':'E'},)*2, ({'A':{'B':{},'C':'D'}}, {'B':{},'C':'D'})]) def test_flatten(self,full,reduced): assert util.dict_flatten(full) == reduced def test_double_Bravais_to_Miller(self): with pytest.raises(KeyError): util.Bravais_to_Miller(uvtw=np.ones(4),hkil=np.ones(4)) def test_double_Miller_to_Bravais(self): with pytest.raises(KeyError): util.Miller_to_Bravais(uvw=np.ones(4),hkl=np.ones(4)) @pytest.mark.parametrize('vector',np.array([ [1,0,0], [1,1,0], [1,1,1], [1,0,-2], ])) @pytest.mark.parametrize('kw_Miller,kw_Bravais',[('uvw','uvtw'),('hkl','hkil')]) def test_Miller_Bravais_Miller(self,vector,kw_Miller,kw_Bravais): assert np.all(vector == util.Bravais_to_Miller(**{kw_Bravais:util.Miller_to_Bravais(**{kw_Miller:vector})})) @pytest.mark.parametrize('vector',np.array([ [1,0,-1,2], [1,-1,0,3], [1,1,-2,-3], [0,0,0,1], ])) @pytest.mark.parametrize('kw_Miller,kw_Bravais',[('uvw','uvtw'),('hkl','hkil')]) def test_Bravais_Miller_Bravais(self,vector,kw_Miller,kw_Bravais): assert np.all(vector == util.Miller_to_Bravais(**{kw_Miller:util.Bravais_to_Miller(**{kw_Bravais:vector})})) @pytest.mark.parametrize('extra_parameters',[""" p2 : str, optional p2 description 1 p2 description 2 """, """ p2 : str, optional p2 description 1 p2 description 2 """, """ p2 : str, optional p2 description 1 p2 description 2 """]) @pytest.mark.parametrize('invalid_docstring',[""" Function description Parameters ---------- p0 : numpy.ndarray, shape (...,4) p0 description 1 p0 description 2 p1 : int, optional p1 description Remaining description """, """ Function description Parameters ---------- p0 : numpy.ndarray, shape (...,4) p0 description 1 p0 description 2 p1 : int, optional p1 description Remaining description """,]) def test_extend_docstring_parameters(self,extra_parameters,invalid_docstring): test_docstring = """ Function description Parameters ---------- p0 : numpy.ndarray, shape (...,4) p0 description 1 p0 description 2 p1 : int, optional p1 description Remaining description """ expected = """ Function description Parameters ---------- p0 : numpy.ndarray, shape (...,4) p0 description 1 p0 description 2 p1 : int, optional p1 description p2 : str, optional p2 description 1 p2 description 2 Remaining description """.split("\n") assert expected == util._docstringer(test_docstring,extra_parameters).split('\n') with pytest.raises(RuntimeError): util._docstringer(invalid_docstring,extra_parameters) def test_replace_docstring_return_type(self): class TestClassOriginal: pass def original_func() -> TestClassOriginal: pass class TestClassDecorated: def decorated_func_bound(self) -> 'TestClassDecorated': pass def decorated_func() -> TestClassDecorated: pass original_func.__doc__ = """ Function description/Parameters Returns ------- Return value : test_util.TestClassOriginal Remaining description """ expected = """ Function description/Parameters Returns ------- Return value : test_util.TestClassDecorated Remaining description """ assert expected == util._docstringer(original_func,return_type=decorated_func) assert expected == util._docstringer(original_func,return_type=TestClassDecorated.decorated_func_bound)