Merge remote-tracking branch 'origin/development' into polishing

This commit is contained in:
Martin Diehl 2021-06-19 13:28:56 +02:00
commit abd660e62d
8 changed files with 48 additions and 19 deletions

@ -1 +1 @@
Subproject commit 5c10f4a5b00c67382d7b02ec1f1f1da8b203bb4a Subproject commit 8fc3c44f533f5addbea8a787239627cb850206ac

View File

@ -1 +1 @@
v3.0.0-alpha3-235-gdee255ae7 v3.0.0-alpha3-252-g723737c99

View File

@ -32,7 +32,8 @@ class Grid:
Parameters Parameters
---------- ----------
material : numpy.ndarray of shape (:,:,:) material : numpy.ndarray of shape (:,:,:)
Material indices. Material indices. The shape of the material array defines
the number of cells.
size : list or numpy.ndarray of shape (3) size : list or numpy.ndarray of shape (3)
Physical size of grid in meter. Physical size of grid in meter.
origin : list or numpy.ndarray of shape (3), optional origin : list or numpy.ndarray of shape (3), optional

View File

@ -1437,7 +1437,7 @@ class Result:
topology = ET.SubElement(grid, 'Topology') topology = ET.SubElement(grid, 'Topology')
topology.attrib = {'TopologyType': '3DCoRectMesh', topology.attrib = {'TopologyType': '3DCoRectMesh',
'Dimensions': '{} {} {}'.format(*(self.cells+1))} 'Dimensions': '{} {} {}'.format(*(self.cells[::-1]+1))}
geometry = ET.SubElement(grid, 'Geometry') geometry = ET.SubElement(grid, 'Geometry')
geometry.attrib = {'GeometryType':'Origin_DxDyDz'} geometry.attrib = {'GeometryType':'Origin_DxDyDz'}
@ -1446,13 +1446,13 @@ class Result:
origin.attrib = {'Format': 'XML', origin.attrib = {'Format': 'XML',
'NumberType': 'Float', 'NumberType': 'Float',
'Dimensions': '3'} 'Dimensions': '3'}
origin.text = "{} {} {}".format(*self.origin) origin.text = "{} {} {}".format(*self.origin[::-1])
delta = ET.SubElement(geometry, 'DataItem') delta = ET.SubElement(geometry, 'DataItem')
delta.attrib = {'Format': 'XML', delta.attrib = {'Format': 'XML',
'NumberType': 'Float', 'NumberType': 'Float',
'Dimensions': '3'} 'Dimensions': '3'}
delta.text="{} {} {}".format(*(self.size/self.cells)) delta.text="{} {} {}".format(*(self.size/self.cells)[::-1])
attributes.append(ET.SubElement(grid, 'Attribute')) attributes.append(ET.SubElement(grid, 'Attribute'))
attributes[-1].attrib = {'Name': 'u / m', attributes[-1].attrib = {'Name': 'u / m',
@ -1461,7 +1461,7 @@ class Result:
data_items.append(ET.SubElement(attributes[-1], 'DataItem')) data_items.append(ET.SubElement(attributes[-1], 'DataItem'))
data_items[-1].attrib = {'Format': 'HDF', data_items[-1].attrib = {'Format': 'HDF',
'Precision': '8', 'Precision': '8',
'Dimensions': '{} {} {} 3'.format(*(self.cells+1))} 'Dimensions': '{} {} {} 3'.format(*(self.cells[::-1]+1))}
data_items[-1].text = f'{os.path.split(self.fname)[1]}:/{inc}/geometry/u_n' data_items[-1].text = f'{os.path.split(self.fname)[1]}:/{inc}/geometry/u_n'
for ty in ['phase','homogenization']: for ty in ['phase','homogenization']:
@ -1483,7 +1483,7 @@ class Result:
data_items[-1].attrib = {'Format': 'HDF', data_items[-1].attrib = {'Format': 'HDF',
'NumberType': number_type_map(dtype), 'NumberType': number_type_map(dtype),
'Precision': f'{dtype.itemsize}', 'Precision': f'{dtype.itemsize}',
'Dimensions': '{} {} {} {}'.format(*self.cells,1 if shape == () else 'Dimensions': '{} {} {} {}'.format(*self.cells[::-1],1 if shape == () else
np.prod(shape))} np.prod(shape))}
data_items[-1].text = f'{os.path.split(self.fname)[1]}:{name}' data_items[-1].text = f'{os.path.split(self.fname)[1]}:{name}'
@ -1516,10 +1516,9 @@ class Result:
Export to VTK cell/point data. Export to VTK cell/point data.
One VTK file per visible increment is created. One VTK file per visible increment is created.
For cell data, the VTK format is a image data (.vti) for For point data, the VTK format is poly data (.vtp).
grid-based simulations and an unstructured grid (.vtu) for For cell data, either an image (.vti) or unstructured (.vtu) dataset
mesh-baed simulations. For point data, the VTK format is poly is written for grid-based or mesh-based simulations, respectively.
data (.vtp).
Parameters Parameters
---------- ----------
@ -1539,7 +1538,7 @@ class Result:
Fill value for non-existent entries of integer type. Fill value for non-existent entries of integer type.
Defaults to 0. Defaults to 0.
parallel : bool parallel : bool
Write out VTK files in parallel in a separate background process. Write VTK files in parallel in a separate background process.
Defaults to True. Defaults to True.
""" """

View File

@ -8,6 +8,7 @@ import hashlib
from datetime import datetime from datetime import datetime
import pytest import pytest
import vtk
import numpy as np import numpy as np
from damask import Result from damask import Result
@ -404,7 +405,7 @@ class TestResult:
os.chdir(tmp_path) os.chdir(tmp_path)
single_phase.export_VTK(mode=mode) single_phase.export_VTK(mode=mode)
def test_XDMF(self,tmp_path,single_phase,update,ref_path): def test_XDMF_datatypes(self,tmp_path,single_phase,update,ref_path):
for shape in [('scalar',()),('vector',(3,)),('tensor',(3,3)),('matrix',(12,))]: for shape in [('scalar',()),('vector',(3,)),('tensor',(3,3)),('matrix',(12,))]:
for dtype in ['f4','f8','i1','i2','i4','i8','u1','u2','u4','u8']: for dtype in ['f4','f8','i1','i2','i4','i8','u1','u2','u4','u8']:
single_phase.add_calculation(f"np.ones(np.shape(#F#)[0:1]+{shape[1]},'{dtype}')",f'{shape[0]}_{dtype}') single_phase.add_calculation(f"np.ones(np.shape(#F#)[0:1]+{shape[1]},'{dtype}')",f'{shape[0]}_{dtype}')
@ -413,8 +414,35 @@ class TestResult:
single_phase.export_XDMF() single_phase.export_XDMF()
if update: if update:
shutil.copy(tmp_path/fname,ref_path/fname) shutil.copy(tmp_path/fname,ref_path/fname)
assert sorted(open(tmp_path/fname).read()) == sorted(open(ref_path/fname).read()) # XML is not ordered assert sorted(open(tmp_path/fname).read()) == sorted(open(ref_path/fname).read()) # XML is not ordered
@pytest.mark.skipif(not hasattr(vtk,'vtkXdmfReader'),reason='https://discourse.vtk.org/t/2450')
def test_XDMF_shape(self,tmp_path,single_phase):
os.chdir(tmp_path)
single_phase.export_XDMF()
fname = os.path.splitext(os.path.basename(single_phase.fname))[0]+'.xdmf'
reader_xdmf = vtk.vtkXdmfReader()
reader_xdmf.SetFileName(fname)
reader_xdmf.Update()
dim_xdmf = reader_xdmf.GetOutput().GetDimensions()
bounds_xdmf = reader_xdmf.GetOutput().GetBounds()
single_phase.view('increments',0).export_VTK()
fname = os.path.splitext(os.path.basename(single_phase.fname))[0]+'_inc00.vti'
for i in range(10): # waiting for parallel IO
reader_vti = vtk.vtkXMLImageDataReader()
reader_vti.SetFileName(fname)
reader_vti.Update()
dim_vti = reader_vti.GetOutput().GetDimensions()
bounds_vti = reader_vti.GetOutput().GetBounds()
if dim_vti == dim_xdmf and bounds_vti == bounds_xdmf:
return
time.sleep(.5)
assert False
def test_XDMF_invalid(self,default): def test_XDMF_invalid(self,default):
with pytest.raises(TypeError): with pytest.raises(TypeError):
default.export_XDMF() default.export_XDMF()

View File

@ -80,8 +80,8 @@ subroutine discretization_results
call results_closeGroup(results_addGroup('current/geometry')) call results_closeGroup(results_addGroup('current/geometry'))
u = discretization_NodeCoords (1:3,:discretization_sharedNodesBegin) & u = discretization_NodeCoords (:,:discretization_sharedNodesBegin) &
- discretization_NodeCoords0(1:3,:discretization_sharedNodesBegin) - discretization_NodeCoords0(:,:discretization_sharedNodesBegin)
call results_writeDataset(u,'current/geometry','u_n','displacements of the nodes','m') call results_writeDataset(u,'current/geometry','u_n','displacements of the nodes','m')
u = discretization_IPcoords & u = discretization_IPcoords &

View File

@ -226,6 +226,7 @@ pure function permutationStar111(point) result(qPt)
temp(:,1) = [point(1), point(2), 1.0_pReal - point(1) - point(2)] temp(:,1) = [point(1), point(2), 1.0_pReal - point(1) - point(2)]
temp(:,2) = [point(1), 1.0_pReal - point(1) - point(2), point(2)] temp(:,2) = [point(1), 1.0_pReal - point(1) - point(2), point(2)]
temp(:,3) = [point(2), point(1), 1.0_pReal - point(1) - point(2)]
temp(:,4) = [point(2), 1.0_pReal - point(1) - point(2), point(1)] temp(:,4) = [point(2), 1.0_pReal - point(1) - point(2), point(1)]
temp(:,5) = [1.0_pReal - point(1) - point(2), point(2), point(1)] temp(:,5) = [1.0_pReal - point(1) - point(2), point(2), point(1)]
temp(:,6) = [1.0_pReal - point(1) - point(2), point(1), point(2)] temp(:,6) = [1.0_pReal - point(1) - point(2), point(1), point(2)]

View File

@ -146,10 +146,9 @@ subroutine discretization_mesh_init(restart)
! Get initial nodal coordinates ! Get initial nodal coordinates
call DMGetCoordinates(geomMesh,coords_node0,ierr) call DMGetCoordinates(geomMesh,coords_node0,ierr)
CHKERRQ(ierr) CHKERRQ(ierr)
allocate(mesh_node0_temp(dimPlex*mesh_Nnodes))
call VecGetArrayF90(coords_node0, mesh_node0_temp,ierr) call VecGetArrayF90(coords_node0, mesh_node0_temp,ierr)
CHKERRQ(ierr) CHKERRQ(ierr)
mesh_maxNips = FEM_nQuadrature(dimPlex,integrationOrder) mesh_maxNips = FEM_nQuadrature(dimPlex,integrationOrder)
call mesh_FEM_build_ipCoordinates(dimPlex,FEM_quadrature_points(dimPlex,integrationOrder)%p) call mesh_FEM_build_ipCoordinates(dimPlex,FEM_quadrature_points(dimPlex,integrationOrder)%p)
@ -166,7 +165,8 @@ subroutine discretization_mesh_init(restart)
if (debug_ip < 1 .or. debug_ip > mesh_maxNips) call IO_error(602,ext_msg='IP') if (debug_ip < 1 .or. debug_ip > mesh_maxNips) call IO_error(602,ext_msg='IP')
allocate(mesh_node0(3,mesh_Nnodes),source=0.0_pReal) allocate(mesh_node0(3,mesh_Nnodes),source=0.0_pReal)
mesh_node0 = reshape(mesh_node0_temp,[dimPlex,mesh_Nnodes]) mesh_node0(1:dimPlex,:) = reshape(mesh_node0_temp,[dimPlex,mesh_Nnodes])
call discretization_init(materialAt,& call discretization_init(materialAt,&
reshape(mesh_ipCoordinates,[3,mesh_maxNips*mesh_NcpElems]), & reshape(mesh_ipCoordinates,[3,mesh_maxNips*mesh_NcpElems]), &