Merge remote-tracking branch 'origin/development' into polishing

This commit is contained in:
Martin Diehl 2021-06-19 13:28:56 +02:00
commit abd660e62d
8 changed files with 48 additions and 19 deletions

@ -1 +1 @@
Subproject commit 5c10f4a5b00c67382d7b02ec1f1f1da8b203bb4a
Subproject commit 8fc3c44f533f5addbea8a787239627cb850206ac

View File

@ -1 +1 @@
v3.0.0-alpha3-235-gdee255ae7
v3.0.0-alpha3-252-g723737c99

View File

@ -32,7 +32,8 @@ class Grid:
Parameters
----------
material : numpy.ndarray of shape (:,:,:)
Material indices.
Material indices. The shape of the material array defines
the number of cells.
size : list or numpy.ndarray of shape (3)
Physical size of grid in meter.
origin : list or numpy.ndarray of shape (3), optional

View File

@ -1437,7 +1437,7 @@ class Result:
topology = ET.SubElement(grid, 'Topology')
topology.attrib = {'TopologyType': '3DCoRectMesh',
'Dimensions': '{} {} {}'.format(*(self.cells+1))}
'Dimensions': '{} {} {}'.format(*(self.cells[::-1]+1))}
geometry = ET.SubElement(grid, 'Geometry')
geometry.attrib = {'GeometryType':'Origin_DxDyDz'}
@ -1446,13 +1446,13 @@ class Result:
origin.attrib = {'Format': 'XML',
'NumberType': 'Float',
'Dimensions': '3'}
origin.text = "{} {} {}".format(*self.origin)
origin.text = "{} {} {}".format(*self.origin[::-1])
delta = ET.SubElement(geometry, 'DataItem')
delta.attrib = {'Format': 'XML',
'NumberType': 'Float',
'Dimensions': '3'}
delta.text="{} {} {}".format(*(self.size/self.cells))
delta.text="{} {} {}".format(*(self.size/self.cells)[::-1])
attributes.append(ET.SubElement(grid, 'Attribute'))
attributes[-1].attrib = {'Name': 'u / m',
@ -1461,7 +1461,7 @@ class Result:
data_items.append(ET.SubElement(attributes[-1], 'DataItem'))
data_items[-1].attrib = {'Format': 'HDF',
'Precision': '8',
'Dimensions': '{} {} {} 3'.format(*(self.cells+1))}
'Dimensions': '{} {} {} 3'.format(*(self.cells[::-1]+1))}
data_items[-1].text = f'{os.path.split(self.fname)[1]}:/{inc}/geometry/u_n'
for ty in ['phase','homogenization']:
@ -1483,7 +1483,7 @@ class Result:
data_items[-1].attrib = {'Format': 'HDF',
'NumberType': number_type_map(dtype),
'Precision': f'{dtype.itemsize}',
'Dimensions': '{} {} {} {}'.format(*self.cells,1 if shape == () else
'Dimensions': '{} {} {} {}'.format(*self.cells[::-1],1 if shape == () else
np.prod(shape))}
data_items[-1].text = f'{os.path.split(self.fname)[1]}:{name}'
@ -1516,10 +1516,9 @@ class Result:
Export to VTK cell/point data.
One VTK file per visible increment is created.
For cell data, the VTK format is a image data (.vti) for
grid-based simulations and an unstructured grid (.vtu) for
mesh-baed simulations. For point data, the VTK format is poly
data (.vtp).
For point data, the VTK format is poly data (.vtp).
For cell data, either an image (.vti) or unstructured (.vtu) dataset
is written for grid-based or mesh-based simulations, respectively.
Parameters
----------
@ -1539,7 +1538,7 @@ class Result:
Fill value for non-existent entries of integer type.
Defaults to 0.
parallel : bool
Write out VTK files in parallel in a separate background process.
Write VTK files in parallel in a separate background process.
Defaults to True.
"""

View File

@ -8,6 +8,7 @@ import hashlib
from datetime import datetime
import pytest
import vtk
import numpy as np
from damask import Result
@ -404,7 +405,7 @@ class TestResult:
os.chdir(tmp_path)
single_phase.export_VTK(mode=mode)
def test_XDMF(self,tmp_path,single_phase,update,ref_path):
def test_XDMF_datatypes(self,tmp_path,single_phase,update,ref_path):
for shape in [('scalar',()),('vector',(3,)),('tensor',(3,3)),('matrix',(12,))]:
for dtype in ['f4','f8','i1','i2','i4','i8','u1','u2','u4','u8']:
single_phase.add_calculation(f"np.ones(np.shape(#F#)[0:1]+{shape[1]},'{dtype}')",f'{shape[0]}_{dtype}')
@ -413,8 +414,35 @@ class TestResult:
single_phase.export_XDMF()
if update:
shutil.copy(tmp_path/fname,ref_path/fname)
assert sorted(open(tmp_path/fname).read()) == sorted(open(ref_path/fname).read()) # XML is not ordered
@pytest.mark.skipif(not hasattr(vtk,'vtkXdmfReader'),reason='https://discourse.vtk.org/t/2450')
def test_XDMF_shape(self,tmp_path,single_phase):
os.chdir(tmp_path)
single_phase.export_XDMF()
fname = os.path.splitext(os.path.basename(single_phase.fname))[0]+'.xdmf'
reader_xdmf = vtk.vtkXdmfReader()
reader_xdmf.SetFileName(fname)
reader_xdmf.Update()
dim_xdmf = reader_xdmf.GetOutput().GetDimensions()
bounds_xdmf = reader_xdmf.GetOutput().GetBounds()
single_phase.view('increments',0).export_VTK()
fname = os.path.splitext(os.path.basename(single_phase.fname))[0]+'_inc00.vti'
for i in range(10): # waiting for parallel IO
reader_vti = vtk.vtkXMLImageDataReader()
reader_vti.SetFileName(fname)
reader_vti.Update()
dim_vti = reader_vti.GetOutput().GetDimensions()
bounds_vti = reader_vti.GetOutput().GetBounds()
if dim_vti == dim_xdmf and bounds_vti == bounds_xdmf:
return
time.sleep(.5)
assert False
def test_XDMF_invalid(self,default):
with pytest.raises(TypeError):
default.export_XDMF()

View File

@ -80,8 +80,8 @@ subroutine discretization_results
call results_closeGroup(results_addGroup('current/geometry'))
u = discretization_NodeCoords (1:3,:discretization_sharedNodesBegin) &
- discretization_NodeCoords0(1:3,:discretization_sharedNodesBegin)
u = discretization_NodeCoords (:,:discretization_sharedNodesBegin) &
- discretization_NodeCoords0(:,:discretization_sharedNodesBegin)
call results_writeDataset(u,'current/geometry','u_n','displacements of the nodes','m')
u = discretization_IPcoords &

View File

@ -226,6 +226,7 @@ pure function permutationStar111(point) result(qPt)
temp(:,1) = [point(1), point(2), 1.0_pReal - point(1) - point(2)]
temp(:,2) = [point(1), 1.0_pReal - point(1) - point(2), point(2)]
temp(:,3) = [point(2), point(1), 1.0_pReal - point(1) - point(2)]
temp(:,4) = [point(2), 1.0_pReal - point(1) - point(2), point(1)]
temp(:,5) = [1.0_pReal - point(1) - point(2), point(2), point(1)]
temp(:,6) = [1.0_pReal - point(1) - point(2), point(1), point(2)]

View File

@ -146,10 +146,9 @@ subroutine discretization_mesh_init(restart)
! Get initial nodal coordinates
call DMGetCoordinates(geomMesh,coords_node0,ierr)
CHKERRQ(ierr)
allocate(mesh_node0_temp(dimPlex*mesh_Nnodes))
call VecGetArrayF90(coords_node0, mesh_node0_temp,ierr)
CHKERRQ(ierr)
mesh_maxNips = FEM_nQuadrature(dimPlex,integrationOrder)
call mesh_FEM_build_ipCoordinates(dimPlex,FEM_quadrature_points(dimPlex,integrationOrder)%p)
@ -166,7 +165,8 @@ subroutine discretization_mesh_init(restart)
if (debug_ip < 1 .or. debug_ip > mesh_maxNips) call IO_error(602,ext_msg='IP')
allocate(mesh_node0(3,mesh_Nnodes),source=0.0_pReal)
mesh_node0 = reshape(mesh_node0_temp,[dimPlex,mesh_Nnodes])
mesh_node0(1:dimPlex,:) = reshape(mesh_node0_temp,[dimPlex,mesh_Nnodes])
call discretization_init(materialAt,&
reshape(mesh_ipCoordinates,[3,mesh_maxNips*mesh_NcpElems]), &