merge development into kinematic hardening branch

This commit is contained in:
Zhuowen Zhao 2018-01-29 17:24:46 -05:00
commit 227720dc7d
9 changed files with 382 additions and 319 deletions

2
.gitignore vendored
View File

@ -1,4 +1,3 @@
.noH5py
*.pyc
*.mod
*.o
@ -8,3 +7,4 @@
bin
PRIVATE
build
system_report.txt

View File

@ -1,9 +1,17 @@
#!/usr/bin/env bash
OUTFILE="system_report.txt"
echo generating $OUTFILE
#==================================================================================================
# Execute this script (type './DAMASK_prerequisites.sh')
# and send system_report.txt to damask@mpie.de for support
#==================================================================================================
OUTFILE="system_report.txt"
echo ===========================================
echo + Generating $OUTFILE
echo + Send to damask@mpie.de for support
echo ===========================================
echo date +"%m-%d-%y" >$OUTFILE
# redirect STDOUT and STDERR to logfile
# https://stackoverflow.com/questions/11229385/redirect-all-output-in-a-bash-script-when-using-set-x^
@ -13,6 +21,10 @@ exec > $OUTFILE 2>&1
# https://stackoverflow.com/questions/59895/getting-the-source-directory-of-a-bash-script-from-within
DAMASK_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
echo System report for \'$(hostname)\' created on $(date '+%Y-%m-%d %H:%M:%S') by \'$(whoami)\'
echo XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
echo
echo ==============================================================================================
echo DAMASK settings
echo ==============================================================================================
@ -30,19 +42,24 @@ echo System
echo ==============================================================================================
uname -a
echo
echo PATH: $PATH
echo LD_LIBRARY_PATH: $LD_LIBRARY_PATH
echo PYTHONPATH: $PYTHONPATH
echo SHELL: $SHELL
echo
echo ==============================================================================================
echo Python
echo ==============================================================================================
DEFAULT_PYTHON=python2.7
for executable in python python2 python3 python2.7; do
if [[ "$(which $executable)x" != "x" ]]; then
if which $executable &> /dev/null; then
echo $executable version: $($executable --version 2>&1)
else
echo $executable does not exist
fi
done
echo Location of $DEFAULT_PYTHON: $(ls -la $(which $DEFAULT_PYTHON))
echo Details on $DEFAULT_PYTHON: $(ls -la $(which $DEFAULT_PYTHON))
echo
for module in numpy scipy;do
echo ----------------------------------------------------------------------------------------------
@ -69,7 +86,7 @@ echo ===========================================================================
echo GCC
echo ==============================================================================================
for executable in gcc g++ gfortran ;do
if [[ "$(which $executable)x" != "x" ]]; then
if which $executable &> /dev/null; then
echo $(which $executable) version: $($executable --version 2>&1)
else
echo $executable does not exist
@ -80,7 +97,7 @@ echo ===========================================================================
echo Intel Compiler Suite
echo ==============================================================================================
for executable in icc icpc ifort ;do
if [[ "$(which $executable)x" != "x" ]]; then
if which $executable &> /dev/null; then
echo $(which $executable) version: $($executable --version 2>&1)
else
echo $executable does not exist
@ -91,7 +108,7 @@ echo ===========================================================================
echo MPI Wrappers
echo ==============================================================================================
for executable in mpicc mpiCC mpicxx mpicxx mpifort mpif90 mpif77; do
if [[ "$(which $executable)x" != "x" ]]; then
if which $executable &> /dev/null; then
echo $(which $executable) version: $($executable --show 2>&1)
else
echo $executable does not exist

View File

@ -1 +1 @@
v2.0.1-1004-g2c4df2f
v2.0.1-1015-g532d669

View File

@ -1,6 +1,6 @@
# DAMASK patching
This folder contains patches that modify the functionality of the current version of DAMASK prior to the corresponding inclusion in the official release.
This folder contains patches that modify the functionality of the current development version of DAMASK ahead of the corresponding adoption in the official release.
## Usage
@ -14,8 +14,8 @@ patch -p1 < installation/patch/nameOfPatch
* **fwbw_derivative** switches the default spatial derivative from continuous to forward/backward difference.
This generally reduces spurious oscillations in the result as the spatial accuracy of the derivative is then compatible with the underlying solution grid.
* **PETSc-3.8** adjusts all includes nad calls to PETSc to the 3.8.x API
This allows to use the current version of PETSc
* **PETSc-3.8** adjusts all includes and calls to PETSc to follow the 3.8.x API.
This allows to use the most recent version of PETSc.
## Create patch
commit your changes

View File

@ -49,7 +49,8 @@ class Test():
self.dirBase = os.path.dirname(os.path.realpath(sys.modules[self.__class__.__module__].__file__))
self.parser = OptionParser(description = '{} (Test class version: {})'.format(self.description,damask.version),
self.parser = OptionParser(option_class=damask.extendableOption,
description = '{} (Test class version: {})'.format(self.description,damask.version),
usage = './test.py [options]')
self.parser.add_option("-k", "--keep",
action = "store_true",
@ -65,7 +66,8 @@ class Test():
help = "show all test variants without actual calculation")
self.parser.add_option("-s", "--select",
dest = "select",
help = "run test of given name only")
action = 'extend', metavar = '<string LIST>',
help = "run test(s) of given name only")
self.parser.set_defaults(keep = self.keep,
accept = self.accept,
update = self.updateRequest,
@ -90,7 +92,7 @@ class Test():
if self.options.show:
logging.critical('{}: {}'.format(variant+1,name))
elif self.options.select is not None \
and not (name == self.options.select or str(variant+1) == self.options.select):
and not (name in self.options.select or str(variant+1) in self.options.select):
pass
else:
try:
@ -106,7 +108,7 @@ class Test():
return variant+1 # return culprit
except Exception as e:
logging.critical('exception during variant execution: "{}"'.format(e.message))
logging.critical('exception during variant execution: "{}"'.format(str(e)))
return variant+1 # return culprit
return 0
@ -585,13 +587,13 @@ class Test():
ret = culprit
if culprit == 0:
msg = 'The test passed.' if (self.options.select is not None or len(self.variants) == 1) \
else 'All {} tests passed.'.format(len(self.variants))
count = len(self.variants) if self.options.select is None else len(self.options.select)
msg = 'Test passed.' if count == 1 else 'All {} tests passed.'.format(count)
elif culprit == -1:
msg = 'Warning: Could not start test...'
msg = 'Warning: could not start test...'
ret = 0
else:
msg = ' * Test "{}" failed.'.format(self.variants[culprit-1])
msg = 'Test "{}" failed.'.format(self.variantName(culprit-1))
logging.critical('\n'.join(['*'*40,msg,'*'*40]) + '\n')
return ret

View File

@ -73,22 +73,17 @@ for name in filenames:
table.head_write()
# ------------------------------------------ process data ------------------------------------------
table.data_readArray()
mask = []
for col,dim in zip(columns,dims): mask += range(col,col+dim) # isolate data columns to cumulate
cumulated = np.zeros(len(mask),dtype=float) # prepare output field
cumulated = np.zeros((len(table.data),len(mask))) # prepare output field
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
for i,col in enumerate(mask):
cumulated[i] += float(table.data[col]) # cumulate values
table.data_append(cumulated)
for i,values in enumerate(table.data[:,mask]):
cumulated[i,:] = cumulated[max(0,i-1),:] + values # cumulate values
table.data = np.hstack((table.data,cumulated))
# ------------------------------------------ output result -----------------------------------------
table.data_writeArray()
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------

View File

@ -9,25 +9,34 @@ import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
def merge_dicts(*dict_args):
"""Given any number of dicts, shallow copy and merge into a new dict, with precedence going to key value pairs in latter dicts."""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def curlFFT(geomdim,field):
"""Calculate curl of a vector or tensor field by transforming into Fourier space."""
shapeFFT = np.array(np.shape(field))[0:3]
grid = np.array(np.shape(field)[2::-1])
N = grid.prod() # field size
n = np.array(np.shape(field)[3:]).prod() # data size
if n == 3: dataType = 'vector'
elif n == 9: dataType = 'tensor'
field_fourier = np.fft.rfftn(field,axes=(0,1,2),s=shapeFFT)
curl_fourier = np.empty(field_fourier.shape,'c16')
# differentiation in Fourier space
TWOPIIMG = 2.0j*math.pi
einsums = {
3:'slm,ijkl,ijkm->ijks', # vector, 3 -> 3
9:'slm,ijkl,ijknm->ijksn', # tensor, 3x3 -> 3x3
}
k_sk = np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2]))/geomdim[0]
if grid[2]%2 == 0: k_sk[grid[2]//2] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
if grid[2]%2 == 0: k_sk[grid[2]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_sj = np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1]))/geomdim[1]
if grid[1]%2 == 0: k_sj[grid[1]//2] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
if grid[1]%2 == 0: k_sj[grid[1]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_si = np.arange(grid[0]//2+1)/geomdim[2]
@ -38,10 +47,7 @@ def curlFFT(geomdim,field):
e[0, 1, 2] = e[1, 2, 0] = e[2, 0, 1] = 1.0 # Levi-Civita symbols
e[0, 2, 1] = e[2, 1, 0] = e[1, 0, 2] = -1.0
if dataType == 'tensor': # tensor, 3x3 -> 3x3
curl_fourier = np.einsum('slm,ijkl,ijknm->ijksn',e,k_s,field_fourier)*TWOPIIMG
elif dataType == 'vector': # vector, 3 -> 3
curl_fourier = np.einsum('slm,ijkl,ijkm->ijks',e,k_s,field_fourier)*TWOPIIMG
curl_fourier = np.einsum(einsums[n],e,k_s,field_fourier)*TWOPIIMG
return np.fft.irfftn(curl_fourier,axes=(0,1,2),s=shapeFFT).reshape([N,n])
@ -52,31 +58,37 @@ def curlFFT(geomdim,field):
parser = OptionParser(option_class=damask.extendableOption, usage='%prog option(s) [ASCIItable(s)]', description = """
Add column(s) containing curl of requested column(s).
Operates on periodic ordered three-dimensional data sets.
Deals with both vector- and tensor fields.
Operates on periodic ordered three-dimensional data sets
of vector and tensor fields.
""", version = scriptID)
parser.add_option('-p','--pos','--periodiccellcenter',
dest = 'pos',
type = 'string', metavar = 'string',
help = 'label of coordinates [%default]')
parser.add_option('-v','--vector',
dest = 'vector',
parser.add_option('-d','--data',
dest = 'data',
action = 'extend', metavar = '<string LIST>',
help = 'label(s) of vector field values')
parser.add_option('-t','--tensor',
dest = 'tensor',
action = 'extend', metavar = '<string LIST>',
help = 'label(s) of tensor field values')
help = 'label(s) of field values')
parser.set_defaults(pos = 'pos',
)
(options,filenames) = parser.parse_args()
if options.vector is None and options.tensor is None:
parser.error('no data column specified.')
if options.data is None: parser.error('no data column specified.')
# --- define possible data types -------------------------------------------------------------------
datatypes = {
3: {'name': 'vector',
'shape': [3],
},
9: {'name': 'tensor',
'shape': [3,3],
},
}
# --- loop over input files ------------------------------------------------------------------------
@ -87,30 +99,27 @@ for name in filenames:
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
# --- interpret header ----------------------------------------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
items = {
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'active':[], 'column': []},
'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []},
}
errors = []
remarks = []
column = {}
errors = []
active = []
if table.label_dimension(options.pos) != 3: errors.append('coordinates {} are not a vector.'.format(options.pos))
else: colCoord = table.label_index(options.pos)
coordDim = table.label_dimension(options.pos)
if coordDim != 3:
errors.append('coordinates "{}" must be three-dimensional.'.format(options.pos))
else: coordCol = table.label_index(options.pos)
for type, data in items.iteritems():
for what in (data['labels'] if data['labels'] is not None else []):
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
for me in options.data:
dim = table.label_dimension(me)
if dim in datatypes:
active.append(merge_dicts({'label':me},datatypes[dim]))
remarks.append('differentiating {} "{}"...'.format(datatypes[dim]['name'],me))
else:
items[type]['active'].append(what)
items[type]['column'].append(table.label_index(what))
remarks.append('skipping "{}" of dimension {}...'.format(me,dim) if dim != -1 else \
'"{}" not found...'.format(me) )
if remarks != []: damask.util.croak(remarks)
if errors != []:
@ -121,16 +130,16 @@ for name in filenames:
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for type, data in items.iteritems():
for label in data['active']:
table.labels_append(['{}_curlFFT({})'.format(i+1,label) for i in range(data['dim'])]) # extend ASCII header with new labels
for data in active:
table.labels_append(['{}_curlFFT({})'.format(i+1,data['label'])
for i in range(np.prod(np.array(data['shape'])))]) # extend ASCII header with new labels
table.head_write()
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
coords = [np.unique(table.data[:,colCoord+i]) for i in range(3)]
coords = [np.unique(table.data[:,coordCol+i]) for i in range(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')
@ -140,11 +149,10 @@ for name in filenames:
# ------------------------------------------ process value field -----------------------------------
stack = [table.data]
for type, data in items.iteritems():
for i,label in enumerate(data['active']):
for data in active:
# we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
stack.append(curlFFT(size[::-1],
table.data[:,data['column'][i]:data['column'][i]+data['dim']].
table.data[:,table.label_indexrange(data['label'])].
reshape(grid[::-1].tolist()+data['shape'])))
# ------------------------------------------ output result -----------------------------------------

View File

@ -9,34 +9,41 @@ import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
def merge_dicts(*dict_args):
"""Given any number of dicts, shallow copy and merge into a new dict, with precedence going to key value pairs in latter dicts."""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
def gradFFT(geomdim,field):
"""Calculate gradient of a vector or scalar field by transforming into Fourier space."""
shapeFFT = np.array(np.shape(field))[0:3]
grid = np.array(np.shape(field)[2::-1])
N = grid.prod() # field size
n = np.array(np.shape(field)[3:]).prod() # data size
if n == 3: dataType = 'vector'
elif n == 1: dataType = 'scalar'
field_fourier = np.fft.rfftn(field,axes=(0,1,2),s=shapeFFT)
grad_fourier = np.empty(field_fourier.shape+(3,),'c16')
# differentiation in Fourier space
TWOPIIMG = 2.0j*math.pi
einsums = {
1:'ijkl,ijkm->ijkm', # scalar, 1 -> 3
3:'ijkl,ijkm->ijklm', # vector, 3 -> 3x3
}
k_sk = np.where(np.arange(grid[2])>grid[2]//2,np.arange(grid[2])-grid[2],np.arange(grid[2]))/geomdim[0]
if grid[2]%2 == 0: k_sk[grid[2]//2] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
if grid[2]%2 == 0: k_sk[grid[2]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_sj = np.where(np.arange(grid[1])>grid[1]//2,np.arange(grid[1])-grid[1],np.arange(grid[1]))/geomdim[1]
if grid[1]%2 == 0: k_sj[grid[1]//2] = 0 # for even grid, set Nyquist freq to 0 (Johnson, MIT, 2011)
if grid[1]%2 == 0: k_sj[grid[1]//2] = 0 # Nyquist freq=0 for even grid (Johnson, MIT, 2011)
k_si = np.arange(grid[0]//2+1)/geomdim[2]
kk, kj, ki = np.meshgrid(k_sk,k_sj,k_si,indexing = 'ij')
k_s = np.concatenate((ki[:,:,:,None],kj[:,:,:,None],kk[:,:,:,None]),axis = 3).astype('c16')
if dataType == 'vector': # vector, 3 -> 3x3
grad_fourier = np.einsum('ijkl,ijkm->ijklm',field_fourier,k_s)*TWOPIIMG
elif dataType == 'scalar': # scalar, 1 -> 3
grad_fourier = np.einsum('ijkl,ijkl->ijkl',field_fourier,k_s)*TWOPIIMG
grad_fourier = np.einsum(einsums[n],field_fourier,k_s)*TWOPIIMG
return np.fft.irfftn(grad_fourier,axes=(0,1,2),s=shapeFFT).reshape([N,3*n])
@ -47,8 +54,8 @@ def gradFFT(geomdim,field):
parser = OptionParser(option_class=damask.extendableOption, usage='%prog option(s) [ASCIItable(s)]', description = """
Add column(s) containing gradient of requested column(s).
Operates on periodic ordered three-dimensional data sets.
Deals with both vector- and scalar fields.
Operates on periodic ordered three-dimensional data sets
of vector and scalar fields.
""", version = scriptID)
@ -56,22 +63,28 @@ parser.add_option('-p','--pos','--periodiccellcenter',
dest = 'pos',
type = 'string', metavar = 'string',
help = 'label of coordinates [%default]')
parser.add_option('-v','--vector',
dest = 'vector',
parser.add_option('-d','--data',
dest = 'data',
action = 'extend', metavar = '<string LIST>',
help = 'label(s) of vector field values')
parser.add_option('-s','--scalar',
dest = 'scalar',
action = 'extend', metavar = '<string LIST>',
help = 'label(s) of scalar field values')
help = 'label(s) of field values')
parser.set_defaults(pos = 'pos',
)
(options,filenames) = parser.parse_args()
if options.vector is None and options.scalar is None:
parser.error('no data column specified.')
if options.data is None: parser.error('no data column specified.')
# --- define possible data types -------------------------------------------------------------------
datatypes = {
1: {'name': 'scalar',
'shape': [1],
},
3: {'name': 'vector',
'shape': [3],
},
}
# --- loop over input files ------------------------------------------------------------------------
@ -82,30 +95,27 @@ for name in filenames:
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------
# --- interpret header ----------------------------------------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
items = {
'scalar': {'dim': 1, 'shape': [1], 'labels':options.scalar, 'active':[], 'column': []},
'vector': {'dim': 3, 'shape': [3], 'labels':options.vector, 'active':[], 'column': []},
}
errors = []
remarks = []
column = {}
errors = []
active = []
if table.label_dimension(options.pos) != 3: errors.append('coordinates {} are not a vector.'.format(options.pos))
else: colCoord = table.label_index(options.pos)
coordDim = table.label_dimension(options.pos)
if coordDim != 3:
errors.append('coordinates "{}" must be three-dimensional.'.format(options.pos))
else: coordCol = table.label_index(options.pos)
for type, data in items.iteritems():
for what in (data['labels'] if data['labels'] is not None else []):
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
for me in options.data:
dim = table.label_dimension(me)
if dim in datatypes:
active.append(merge_dicts({'label':me},datatypes[dim]))
remarks.append('differentiating {} "{}"...'.format(datatypes[dim]['name'],me))
else:
items[type]['active'].append(what)
items[type]['column'].append(table.label_index(what))
remarks.append('skipping "{}" of dimension {}...'.format(me,dim) if dim != -1 else \
'"{}" not found...'.format(me) )
if remarks != []: damask.util.croak(remarks)
if errors != []:
@ -116,16 +126,16 @@ for name in filenames:
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for type, data in items.iteritems():
for label in data['active']:
table.labels_append(['{}_gradFFT({})'.format(i+1,label) for i in range(3 * data['dim'])]) # extend ASCII header with new labels
for data in active:
table.labels_append(['{}_gradFFT({})'.format(i+1,data['label'])
for i in range(coordDim*np.prod(np.array(data['shape'])))]) # extend ASCII header with new labels
table.head_write()
# --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray()
coords = [np.unique(table.data[:,colCoord+i]) for i in range(3)]
coords = [np.unique(table.data[:,coordCol+i]) for i in range(3)]
mincorner = np.array(map(min,coords))
maxcorner = np.array(map(max,coords))
grid = np.array(map(len,coords),'i')
@ -135,11 +145,10 @@ for name in filenames:
# ------------------------------------------ process value field -----------------------------------
stack = [table.data]
for type, data in items.iteritems():
for i,label in enumerate(data['active']):
for data in active:
# we need to reverse order here, because x is fastest,ie rightmost, but leftmost in our x,y,z notation
stack.append(gradFFT(size[::-1],
table.data[:,data['column'][i]:data['column'][i]+data['dim']].
table.data[:,table.label_indexrange(data['label'])].
reshape(grid[::-1].tolist()+data['shape'])))
# ------------------------------------------ output result -----------------------------------------

View File

@ -1,7 +1,7 @@
#!/usr/bin/env python2.7
# -*- coding: UTF-8 no BOM -*-
import os,re
import os,sys,re
import argparse
import damask
import vtk, numpy as np
@ -9,23 +9,37 @@ import vtk, numpy as np
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName, damask.version])
parser = argparse.ArgumentParser(description='Convert from Marc input file format to VTK', version = scriptID)
parser.add_argument('filename', type=str, nargs='+', help='files to convert')
parser = argparse.ArgumentParser(description='Convert from Marc input file format (.dat) to VTK format (.vtu)', version = scriptID)
parser.add_argument('filename', type=str, help='file to convert')
parser.add_argument('-t', '--table', type=str, help='ASCIItable file containing nodal data to subdivide and interpolate')
args = parser.parse_args()
files = args.filename
if type(files) is str:
files = [files]
for f in files:
with open(f, 'r') as marcfile:
with open(args.filename, 'r') as marcfile:
marctext = marcfile.read();
# Load table (if any)
if args.table is not None:
try:
table = damask.ASCIItable(
name=args.table,
outname='subdivided_{}'.format(args.table),
buffered=True
)
table.head_read()
table.data_readArray()
# Python list is faster for appending
nodal_data = list(table.data)
except: args.table = None
# Extract connectivity chunk from file...
connectivity_text = re.findall(r'connectivity[\n\r]+(.*?)[\n\r]+[a-zA-Z]', marctext, flags=(re.MULTILINE | re.DOTALL))[0]
connectivity_lines = re.split(r'[\n\r]+', connectivity_text, flags=(re.MULTILINE | re.DOTALL))
connectivity_header = connectivity_lines[0]
connectivity_lines = connectivity_lines[1:]
# Construct element map
elements = dict(map(lambda line:
(
@ -35,11 +49,13 @@ for f in files:
'verts': list(map(int, re.split(r' +', line[20:].strip())))
}
), connectivity_lines))
# Extract coordinate chunk from file
coordinates_text = re.findall(r'coordinates[\n\r]+(.*?)[\n\r]+[a-zA-Z]', marctext, flags=(re.MULTILINE | re.DOTALL))[0]
coordinates_lines = re.split(r'[\n\r]+', coordinates_text, flags=(re.MULTILINE | re.DOTALL))
coordinates_header = coordinates_lines[0]
coordinates_lines = coordinates_lines[1:]
# marc input file does not use "e" in scientific notation, this adds it and converts
fl_format = lambda string: float(re.sub(r'(\d)([\+\-])', r'\1e\2', string))
# Construct coordinate map
@ -65,14 +81,17 @@ for f in files:
if edge in edge_to_vert:
return edge_to_vert[edge]
# Vertex does not exist, create it
newvert = len(coordinates) + 1
coordinates[newvert] = 0.5 * (coordinates[vert1] + coordinates[vert2]) # Average
edge_to_vert[edge] = newvert;
# Interpolate nodal data
if args.table is not None:
nodal_data.append(0.5 * (nodal_data[vert1 - 1] + nodal_data[vert2 - 1]))
return newvert;
for el_id in range(1, len(elements) + 1):
for el_id in range(1, len(elements) + 1): # Marc starts counting at 1
el = elements[el_id]
if el['type'] == 7:
# Hexahedron, subdivided
@ -110,7 +129,8 @@ for f in files:
subverts[0, 1, 2] = subdivide_edge(subverts[0, 2, 2], subverts[0, 0, 2])
# then faces... The edge_to_vert addition is due to there being two ways
# to calculate a face, depending which opposite vertices are used to subdivide
# to calculate a face vertex, depending on which opposite vertices are used to subdivide.
# This way, we avoid creating duplicate vertices.
subverts[1, 1, 0] = subdivide_edge(subverts[1, 0, 0], subverts[1, 2, 0])
edge_to_vert[ordered_pair(subverts[0, 1, 0], subverts[2, 1, 0])] = subverts[1, 1, 0]
@ -143,10 +163,10 @@ for f in files:
hex_ = vtk.vtkHexahedron()
for vert_id in range(8):
coord = order[vert_id] + (x, y, z)
hex_.GetPointIds().SetId(vert_id, subverts[coord[0], coord[1], coord[2]] - 1) # minus one, since vtk starts at zero but marc starts at one
# minus one, since vtk starts at zero but marc starts at one
hex_.GetPointIds().SetId(vert_id, subverts[coord[0], coord[1], coord[2]] - 1)
grid.InsertNextCell(hex_.GetCellType(), hex_.GetPointIds())
else:
damask.util.croak('Unsupported Marc element type: {} (skipping)'.format(el['type']))
@ -159,9 +179,21 @@ for f in files:
# grid now contains the elements from the given marc file
writer = vtk.vtkXMLUnstructuredGridWriter()
writer.SetFileName(re.sub(r'\..+', ".vtu", f)) # *.vtk extension does not work in paraview
#writer.SetCompressorTypeToZLib()
writer.SetFileName(re.sub(r'\..+', ".vtu", args.filename)) # *.vtk extension does not work in paraview
if vtk.VTK_MAJOR_VERSION <= 5: writer.SetInput(grid)
else: writer.SetInputData(grid)
writer.Write()
if args.table is not None:
table.info_append([
scriptID + ' ' + ' '.join(sys.argv[1:]),
])
table.head_write()
table.output_flush()
table.data = np.array(nodal_data)
table.data_writeArray()
table.close()