Merge branch 'development' into Marc-use-statev-2

This commit is contained in:
Martin Diehl 2020-09-15 07:41:38 +02:00
commit b83f2e5444
34 changed files with 901 additions and 487 deletions

View File

@ -141,13 +141,6 @@ Pre_General:
- release
###################################################################################################
Post_ASCIItable:
stage: postprocessing
script: ASCIItable/test.py
except:
- master
- release
Post_General:
stage: postprocessing
script: PostProcessing/test.py
@ -499,7 +492,7 @@ mergeIntoMaster:
removeData:
stage: clean
before_script:
- echo 'Do nothing'
- echo "Removing data and lock of pipeline $CI_PIPELINE_ID"
script:
- rm -rf $TESTROOT/GitLabCI_Pipeline_$CI_PIPELINE_ID
- sed -i "/$CI_PIPELINE_ID/d" $TESTROOT/GitLabCI.queue # in case pipeline was manually (web GUI) restarted and releaseLock was performed already
@ -511,7 +504,7 @@ removeData:
removeLock:
stage: releaseLock
before_script:
- echo 'Do nothing'
- echo "Removing lock of pipeline $CI_PIPELINE_ID"
when: always
script: sed -i "/$CI_PIPELINE_ID/d" $TESTROOT/GitLabCI.queue
except:

View File

@ -184,7 +184,7 @@ if (CMAKE_BUILD_TYPE STREQUAL "DEBUG")
endif ()
set (CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE} "${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}} ${PETSC_INCLUDES} ${BUILDCMD_POST}")
set (CMAKE_Fortran_LINK_EXECUTABLE "${CMAKE_Fortran_LINK_EXECUTABLE} <OBJECTS> -o <TARGET> <LINK_LIBRARIES> ${PETSC_EXTERNAL_LIB} ${BUILDCMD_POST}")
set (CMAKE_Fortran_LINK_EXECUTABLE "${CMAKE_Fortran_LINK_EXECUTABLE} <OBJECTS> -o <TARGET> <LINK_LIBRARIES> ${PETSC_EXTERNAL_LIB} -lz ${BUILDCMD_POST}")
message ("Fortran Compiler Flags:\n${CMAKE_Fortran_FLAGS_${CMAKE_BUILD_TYPE}}\n")
message ("C Compiler Flags:\n${CMAKE_C_FLAGS_${CMAKE_BUILD_TYPE}}\n")

@ -1 +1 @@
Subproject commit 8a35b8834010dedb453b9e5ad7bbc5791ab25e11
Subproject commit 555f3e01f2b5cf43ade1bd48423b890adca21771

View File

@ -1 +1 @@
v3.0.0-alpha-92-g751bf786
v3.0.0-alpha-160-g117f85ec

View File

@ -39,21 +39,21 @@ for filename in options.filenames:
N_digits = 5 # hack to keep test intact
for inc in damask.util.show_progress(results.iterate('increments'),len(results.increments)):
table = damask.Table(np.ones(np.product(results.grid),dtype=int)*int(inc[3:]),{'inc':(1,)})
table.add('pos',coords.reshape(-1,3))
table = table.add('pos',coords.reshape(-1,3))
results.pick('materialpoints',False)
results.pick('constituents', True)
for label in options.con:
x = results.get_dataset_location(label)
if len(x) != 0:
table.add(label,results.read_dataset(x,0,plain=True).reshape(results.grid.prod(),-1))
table = table.add(label,results.read_dataset(x,0,plain=True).reshape(results.grid.prod(),-1))
results.pick('constituents', False)
results.pick('materialpoints',True)
for label in options.mat:
x = results.get_dataset_location(label)
if len(x) != 0:
table.add(label,results.read_dataset(x,0,plain=True).reshape(results.grid.prod(),-1))
table = table.add(label,results.read_dataset(x,0,plain=True).reshape(results.grid.prod(),-1))
dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir))
if not os.path.isdir(dirname):

View File

@ -181,14 +181,14 @@ for name in filenames:
if options.shape:
centers = damask.grid_filters.cell_coord(size,F)
shapeMismatch = shapeMismatch(size,F,nodes,centers)
table.add('shapeMismatch(({}))'.format(options.defgrad),
shapeMismatch.reshape(-1,1,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('shapeMismatch(({}))'.format(options.defgrad),
shapeMismatch.reshape(-1,1,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
if options.volume:
volumeMismatch = volumeMismatch(size,F,nodes)
table.add('volMismatch(({}))'.format(options.defgrad),
volumeMismatch.reshape(-1,1,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('volMismatch(({}))'.format(options.defgrad),
volumeMismatch.reshape(-1,1,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -51,8 +51,8 @@ for name in filenames:
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
field = field.reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+shape)
curl = damask.grid_filters.curl(size,field)
table.add('curlFFT({})'.format(label),
curl.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape),order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('curlFFT({})'.format(label),
curl.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape),order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -67,8 +67,8 @@ for name in filenames:
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for label in options.labels:
table.add('d({})/d({})'.format(label,options.coordinates),
derivative(table.get(options.coordinates),table.get(label)),
scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('d({})/d({})'.format(label,options.coordinates),
derivative(table.get(options.coordinates),table.get(label)),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -53,19 +53,19 @@ for name in filenames:
F = table.get(options.f).reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+(3,3))
if options.nodal:
table = damask.Table(damask.grid_filters.node_coord0(grid,size).reshape(-1,3,order='F'),
{'pos':(3,)})
table.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.node_displacement_avg(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.node_displacement_fluct(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
{'pos':(3,)})\
.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.node_displacement_avg(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))\
.add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.node_displacement_fluct(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else os.path.splitext(name)[0]+'_nodal.txt')
else:
table.add('avg({}).{}'.format(options.f,options.pos),
table = table.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.cell_displacement_avg(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.add('fluct({}).{}'.format(options.f,options.pos),
scriptID+' '+' '.join(sys.argv[1:]))\
.add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.cell_displacement_fluct(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -51,8 +51,8 @@ for name in filenames:
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
field = field.reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+shape)
div = damask.grid_filters.divergence(size,field)
table.add('divFFT({})'.format(label),
div.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)//3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('divFFT({})'.format(label),
div.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)//3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -180,8 +180,8 @@ for name in filenames:
for i,feature in enumerate(feature_list):
table.add('ED_{}({})'.format(features[feature]['names'][0],options.id),
distance[i,:],
scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('ED_{}({})'.format(features[feature]['names'][0],options.id),
distance[i,:],
scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -67,10 +67,10 @@ for name in filenames:
damask.grid_filters.coord0_check(table.get(options.pos))
for label in options.labels:
table.add('Gauss{}({})'.format(options.sigma,label),
ndimage.filters.gaussian_filter(table.get(label).reshape(-1),
options.sigma,options.order,
mode = 'wrap' if options.periodic else 'nearest'),
scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('Gauss{}({})'.format(options.sigma,label),
ndimage.filters.gaussian_filter(table.get(label).reshape(-1),
options.sigma,options.order,
mode = 'wrap' if options.periodic else 'nearest'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -51,8 +51,8 @@ for name in filenames:
shape = (1,) if np.prod(field.shape)//np.prod(grid) == 1 else (3,) # scalar or vector
field = field.reshape(tuple(grid)+(-1,),order='F')
grad = damask.grid_filters.gradient(size,field)
table.add('gradFFT({})'.format(label),
grad.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)*3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('gradFFT({})'.format(label),
grad.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)*3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -1,64 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Add data in column(s) of mapped ASCIItable selected from the row indexed by the value in a mapping column.
Row numbers start at 1.
""", version = scriptID)
parser.add_option('--index',
dest = 'index',
type = 'string', metavar = 'string',
help = 'column label containing row index')
parser.add_option('-o','--offset',
dest = 'offset',
type = 'int', metavar = 'int',
help = 'constant offset for index column value [%default]')
parser.add_option('-l','--label',
dest = 'label',
action = 'extend', metavar = '<string LIST>',
help = 'column label(s) to be appended')
parser.add_option('-a','--asciitable',
dest = 'asciitable',
type = 'string', metavar = 'string',
help = 'indexed ASCIItable')
parser.set_defaults(offset = 0,
)
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if options.label is None:
parser.error('no data columns specified.')
if options.index is None:
parser.error('no index column given.')
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
indexedTable = damask.Table.from_ASCII(options.asciitable)
idx = np.reshape(table.get(options.index).astype(int) + options.offset,(-1))-1
for data in options.label:
table.add(data+'_addIndexed',indexedTable.get(data)[idx],scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -1,118 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from optparse import OptionParser
import numpy as np
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Add data of selected column(s) from (first) row of linked ASCIItable that shares the linking column value.
""", version = scriptID)
parser.add_option('--link',
dest = 'link', nargs = 2,
type = 'string', metavar = 'string string',
help = 'column labels of table and linked table containing linking values')
parser.add_option('-l','--label',
dest = 'label',
action = 'extend', metavar = '<string LIST>',
help = 'column label(s) to add from linked ASCIItable')
parser.add_option('-a','--asciitable',
dest = 'asciitable',
type = 'string', metavar = 'string',
help = 'linked ASCIItable')
parser.set_defaults()
(options,filenames) = parser.parse_args()
if options.label is None:
parser.error('no data columns specified.')
if options.link is None:
parser.error('no linking columns given.')
# -------------------------------------- process linked ASCIItable --------------------------------
if options.asciitable is not None and os.path.isfile(options.asciitable):
linkedTable = damask.ASCIItable(name = options.asciitable, readonly = True)
linkedTable.head_read() # read ASCII header info of linked table
linkDim = linkedTable.label_dimension(options.link[1]) # dimension of linking column
missing_labels = linkedTable.data_readArray([options.link[1]]+options.label) # try reading linked ASCII table
linkedTable.close() # close linked ASCII table
if len(missing_labels) > 0:
damask.util.croak('column{} {} not found...'.format('s' if len(missing_labels) > 1 else '',', '.join(missing_labels)))
if len(missing_labels) >= len(options.label):
damask.util.croak('aborting...')
sys.exit()
index = linkedTable.data[:,:linkDim]
data = linkedTable.data[:,linkDim:]
else:
parser.error('no linked ASCIItable given.')
# --- loop over input files -----------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name)
except IOError:
continue
damask.util.report(scriptName,"{} {} <== {} {}".format(name,damask.util.deemph('@ '+options.link[0]),
options.asciitable,damask.util.deemph('@ '+options.link[1])))
# ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
myLink = table.label_index (options.link[0])
myLinkDim = table.label_dimension(options.link[0])
if myLink < 0: errors.append('linking column {} not found.'.format(options.link[0]))
if myLinkDim != linkDim: errors.append('dimension mismatch for column {}.'.format(options.link[0]))
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(linkedTable.labels(raw = True)[linkDim:]) # extend with new labels (except for linked column)
table.head_write()
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
try:
table.data_append(data[np.argwhere(np.all((list(map(float,table.data[myLink:myLink+myLinkDim])) - index)==0,
axis=1))[0]]) # add data of first matching line
except IndexError:
table.data_append(np.nan*np.ones_like(data[0])) # or add NaNs
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables

View File

@ -137,14 +137,14 @@ for name in filenames:
if 'rodrigues' in options.output:
table.add('ro({})'.format(label),o.as_Rodrigues(), scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('ro({})'.format(label),o.as_Rodrigues(), scriptID+' '+' '.join(sys.argv[1:]))
if 'eulers' in options.output:
table.add('eu({})'.format(label),o.as_Eulers(options.degrees), scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('eu({})'.format(label),o.as_Eulers(options.degrees), scriptID+' '+' '.join(sys.argv[1:]))
if 'quaternion' in options.output:
table.add('qu({})'.format(label),o.as_quaternion(), scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('qu({})'.format(label),o.as_quaternion(), scriptID+' '+' '.join(sys.argv[1:]))
if 'matrix' in options.output:
table.add('om({})'.format(label),o.as_matrix(), scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('om({})'.format(label),o.as_matrix(), scriptID+' '+' '.join(sys.argv[1:]))
if 'axisangle' in options.output:
table.add('om({})'.format(label),o.as_axisangle(options.degrees), scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('om({})'.format(label),o.as_axisangle(options.degrees), scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -187,6 +187,6 @@ for name in filenames:
np.einsum('ijk,ik->ij',slip_normal, (o@normal)))
for i,label in enumerate(labels):
table.add(label,S[:,i],scriptID+' '+' '.join(sys.argv[1:]))
table = table.add(label,S[:,i],scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -1,142 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from optparse import OptionParser, OptionGroup
import math # noqa
import numpy as np
import damask
def periodicAverage(coords, limits):
"""Centroid in periodic domain, see https://en.wikipedia.org/wiki/Center_of_mass#Systems_with_periodic_boundary_conditions."""
theta = 2.0*np.pi * (coords - limits[0])/(limits[1] - limits[0])
theta_avg = np.pi + np.arctan2(-np.sin(theta).mean(axis=0), -np.cos(theta).mean(axis=0))
return limits[0] + theta_avg * (limits[1] - limits[0])/2.0/np.pi
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Apply a user-specified function to condense into a single row all those rows for which columns 'label' have identical values.
Output table will contain as many rows as there are different (unique) values in the grouping column(s).
Periodic domain averaging of coordinate values is supported.
Examples:
For grain averaged values, replace all rows of particular 'texture' with a single row containing their average.
{name} --label texture --function np.average data.txt
""".format(name = scriptName), version = scriptID)
parser.add_option('-l','--label',
dest = 'label',
action = 'extend', metavar = '<string LIST>',
help = 'column label(s) for grouping rows')
parser.add_option('-f','--function',
dest = 'function',
type = 'string', metavar = 'string',
help = 'mapping function [%default]')
parser.add_option('-a','--all',
dest = 'all',
action = 'store_true',
help = 'apply mapping function also to grouping column(s)')
group = OptionGroup(parser, "periodic averaging", "")
group.add_option ('-p','--periodic',
dest = 'periodic',
action = 'extend', metavar = '<string LIST>',
help = 'coordinate label(s) to average across periodic domain')
group.add_option ('--limits',
dest = 'boundary',
type = 'float', metavar = 'float float', nargs = 2,
help = 'min and max of periodic domain %default')
parser.add_option_group(group)
parser.set_defaults(function = 'np.average',
all = False,
label = [],
boundary = [0.0, 1.0])
(options,filenames) = parser.parse_args()
funcModule,funcName = options.function.split('.')
try:
mapFunction = getattr(locals().get(funcModule) or
globals().get(funcModule) or
__import__(funcModule),
funcName)
except Exception:
mapFunction = None
if options.label is []:
parser.error('no grouping column specified.')
if not hasattr(mapFunction,'__call__'):
parser.error('function "{}" is not callable.'.format(options.function))
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name)
except IOError:
continue
damask.util.report(scriptName,name)
# ------------------------------------------ sanity checks ---------------------------------------
remarks = []
errors = []
table.head_read()
grpColumns = table.label_index(options.label)[::-1]
grpColumns = grpColumns[np.where(grpColumns>=0)]
if len(grpColumns) == 0: errors.append('no valid grouping column present.')
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss=True)
continue
# ------------------------------------------ assemble info ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write()
# ------------------------------------------ process data --------------------------------
table.data_readArray()
indexrange = table.label_indexrange(options.periodic) if options.periodic is not None else []
rows,cols = table.data.shape
table.data = table.data[np.lexsort(table.data[:,grpColumns].T)] # sort data by grpColumn(s)
values,index = np.unique(table.data[:,grpColumns], axis=0, return_index=True) # unique grpColumn values and their positions
index = sorted(np.append(index,rows)) # add termination position
grpTable = np.empty((len(values), cols)) # initialize output
for i in range(len(values)): # iterate over groups (unique values in grpColumn)
grpTable[i] = np.apply_along_axis(mapFunction,0,table.data[index[i]:index[i+1]]) # apply (general) mapping function
grpTable[i,indexrange] = \
periodicAverage(table.data[index[i]:index[i+1],indexrange],options.boundary) # apply periodicAverage mapping function
if not options.all: grpTable[i,grpColumns] = table.data[index[i],grpColumns] # restore grouping column value
table.data = grpTable
# ------------------------------------------ output result -------------------------------
table.data_writeArray()
table.close() # close ASCII table

View File

@ -56,6 +56,6 @@ for name in filenames:
data = table.get(label)
uniques,inverse = np.unique(data,return_inverse=True,axis=0) if options.unique else (data,np.arange(len(data)))
rng.shuffle(uniques)
table.set(label,uniques[inverse], scriptID+' '+' '.join(sys.argv[1:]))
table = table.set(label,uniques[inverse], scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -64,6 +64,5 @@ for name in filenames:
'homogenization\t{}'.format(geom.homogenization)]
table = damask.Table(seeds[mask],{'pos':(3,)},comments)
table.add('microstructure',microstructure[mask])
table.to_file(sys.stdout if name is None else \
os.path.splitext(name)[0]+'.seeds')
table = table.add('microstructure',microstructure[mask])
table.to_file(sys.stdout if name is None else os.path.splitext(name)[0]+'.seeds')

View File

@ -155,11 +155,11 @@ for name in filenames:
]
table = damask.Table(np.hstack((seeds,eulers)),{'pos':(3,),'euler':(3,)},comments)
table.add('microstructure',np.arange(options.microstructure,options.microstructure + options.N,dtype=int))
table = table.add('microstructure',np.arange(options.microstructure,options.microstructure + options.N,dtype=int))
if options.weights:
weights = np.random.uniform(low = 0, high = options.max, size = options.N) if options.max > 0.0 \
else np.random.normal(loc = options.mean, scale = options.sigma, size = options.N)
table.add('weight',weights)
table = table.add('weight',weights)
table.to_file(sys.stdout if name is None else name)

View File

@ -1,4 +1,5 @@
import re
import copy
import pandas as pd
import numpy as np
@ -29,6 +30,15 @@ class Table:
self._label_condensed()
def __copy__(self):
"""Copy Table."""
return copy.deepcopy(self)
def copy(self):
"""Copy Table."""
return self.__copy__()
def _label_flat(self):
"""Label data individually, e.g. v v v ==> 1_v 2_v 3_v."""
labels = []
@ -191,15 +201,16 @@ class Table:
Human-readable information about the new data.
"""
self._add_comment(label,data.shape[1:],info)
dup = self.copy()
dup._add_comment(label,data.shape[1:],info)
if re.match(r'[0-9]*?_',label):
idx,key = label.split('_',1)
iloc = self.data.columns.get_loc(key).tolist().index(True) + int(idx) -1
self.data.iloc[:,iloc] = data
iloc = dup.data.columns.get_loc(key).tolist().index(True) + int(idx) -1
dup.data.iloc[:,iloc] = data
else:
self.data[label] = data.reshape(self.data[label].shape)
dup.data[label] = data.reshape(dup.data[label].shape)
return dup
def add(self,label,data,info=None):
"""
@ -215,15 +226,17 @@ class Table:
Human-readable information about the modified data.
"""
self._add_comment(label,data.shape[1:],info)
dup = self.copy()
dup._add_comment(label,data.shape[1:],info)
self.shapes[label] = data.shape[1:] if len(data.shape) > 1 else (1,)
dup.shapes[label] = data.shape[1:] if len(data.shape) > 1 else (1,)
size = np.prod(data.shape[1:],dtype=int)
new = pd.DataFrame(data=data.reshape(-1,size),
columns=[label]*size,
)
new.index = self.data.index
self.data = pd.concat([self.data,new],axis=1)
new.index = dup.data.index
dup.data = pd.concat([dup.data,new],axis=1)
return dup
def delete(self,label):
@ -236,25 +249,31 @@ class Table:
Column label.
"""
self.data.drop(columns=label,inplace=True)
del self.shapes[label]
dup = self.copy()
dup.data.drop(columns=label,inplace=True)
del dup.shapes[label]
return dup
def rename(self,label_old,label_new,info=None):
def rename(self,old,new,info=None):
"""
Rename column data.
Parameters
----------
label_old : str
Old column label.
label_new : str
New column label.
label_old : str or iterable of str
Old column label(s).
label_new : str or iterable of str
New column label(s).
"""
self.data.rename(columns={label_old:label_new},inplace=True)
self.comments.append(f'{label_old} => {label_new}'+('' if info is None else f': {info}'))
self.shapes = {(label if label != label_old else label_new):self.shapes[label] for label in self.shapes}
dup = self.copy()
columns = dict(zip([old] if isinstance(old,str) else old,
[new] if isinstance(new,str) else new))
dup.data.rename(columns=columns,inplace=True)
dup.comments.append(f'{old} => {new}'+('' if info is None else f': {info}'))
dup.shapes = {(label if label not in columns else columns[label]):dup.shapes[label] for label in dup.shapes}
return dup
def sort_by(self,labels,ascending=True):
@ -269,10 +288,12 @@ class Table:
Set sort order.
"""
self._label_flat()
self.data.sort_values(labels,axis=0,inplace=True,ascending=ascending)
self._label_condensed()
self.comments.append(f'sorted by [{", ".join(labels)}]')
dup = self.copy()
dup._label_flat()
dup.data.sort_values(labels,axis=0,inplace=True,ascending=ascending)
dup._label_condensed()
dup.comments.append(f'sorted {"ascending" if ascending else "descending"} by {labels}')
return dup
def append(self,other):
@ -290,7 +311,9 @@ class Table:
if self.shapes != other.shapes or not self.data.columns.equals(other.data.columns):
raise KeyError('Labels or shapes or order do not match')
else:
self.data = self.data.append(other.data,ignore_index=True)
dup = self.copy()
dup.data = dup.data.append(other.data,ignore_index=True)
return dup
def join(self,other):
@ -308,9 +331,11 @@ class Table:
if set(self.shapes) & set(other.shapes) or self.data.shape[0] != other.data.shape[0]:
raise KeyError('Dublicated keys or row count mismatch')
else:
self.data = self.data.join(other.data)
dup = self.copy()
dup.data = dup.data.join(other.data)
for key in other.shapes:
self.shapes[key] = other.shapes[key]
dup.shapes[key] = other.shapes[key]
return dup
def to_file(self,fname,format='ASCII',new_style=False):

View File

@ -40,20 +40,20 @@ class VTK:
"""
Create VTK of type vtk.vtkRectilinearGrid.
This is the common type for results from the grid solver.
This is the common type for grid solver results.
Parameters
----------
grid : numpy.ndarray of shape (3) of np.dtype = int
Number of cells.
size : numpy.ndarray of shape (3)
Physical length.
origin : numpy.ndarray of shape (3), optional
Spatial origin.
grid : iterable of int, len (3)
Number of cells along each dimension.
size : iterable of float, len (3)
Physical lengths along each dimension.
origin : iterable of float, len (3), optional
Spatial origin coordinates.
"""
vtk_data = vtk.vtkRectilinearGrid()
vtk_data.SetDimensions(*(grid+1))
vtk_data.SetDimensions(*(np.array(grid)+1))
coord = [np_to_vtk(np.linspace(origin[i],origin[i]+size[i],grid[i]+1),deep=True) for i in [0,1,2]]
[coord[i].SetName(n) for i,n in enumerate(['x','y','z'])]
vtk_data.SetXCoordinates(coord[0])
@ -68,7 +68,7 @@ class VTK:
"""
Create VTK of type vtk.vtkUnstructuredGrid.
This is the common type for results from FEM solvers.
This is the common type for FEM solver results.
Parameters
----------
@ -127,7 +127,7 @@ class VTK:
fname : str or pathlib.Path
Filename for reading. Valid extensions are .vtr, .vtu, .vtp, and .vtk.
dataset_type : str, optional
Name of the vtk.vtkDataSet subclass when opening an .vtk file. Valid types are vtkRectilinearGrid,
Name of the vtk.vtkDataSet subclass when opening a .vtk file. Valid types are vtkRectilinearGrid,
vtkUnstructuredGrid, and vtkPolyData.
"""
@ -168,7 +168,7 @@ class VTK:
def _write(writer):
"""Wrapper for parallel writing."""
writer.Write()
def to_file(self,fname,parallel=True):
def to_file(self,fname,parallel=True,compress=True):
"""
Write to file.
@ -192,7 +192,10 @@ class VTK:
if ext and ext != '.'+default_ext:
raise ValueError(f'Given extension {ext} does not match default .{default_ext}')
writer.SetFileName(str(Path(fname).with_suffix('.'+default_ext)))
writer.SetCompressorTypeToZLib()
if compress:
writer.SetCompressorTypeToZLib()
else:
writer.SetCompressorTypeToNone()
writer.SetDataModeToBinary()
writer.SetInputData(self.vtk_data)
@ -215,8 +218,8 @@ class VTK:
Parameters
----------
data : numpy.ndarray
Data to add. First dimension need to match either
number of cells or number of points
Data to add. First dimension needs to match either
number of cells or number of points.
label : str
Data label.
@ -224,22 +227,21 @@ class VTK:
N_points = self.vtk_data.GetNumberOfPoints()
N_cells = self.vtk_data.GetNumberOfCells()
if isinstance(data,np.ndarray):
if isinstance(data,np.ndarray):
if label is None:
raise ValueError('No label defined for numpy.ndarray')
if data.dtype in [np.float64, np.float128]: # avoid large files
d = np_to_vtk(num_array=data.astype(np.float32).reshape(data.shape[0],-1),deep=True)
else:
d = np_to_vtk(num_array=data.reshape(data.shape[0],-1),deep=True)
N_data = data.shape[0]
d = np_to_vtk((data.astype(np.float32) if data.dtype in [np.float64, np.float128]
else data).reshape(N_data,-1),deep=True) # avoid large files
d.SetName(label)
if data.shape[0] == N_cells:
if N_data == N_cells:
self.vtk_data.GetCellData().AddArray(d)
elif data.shape[0] == N_points:
elif N_data == N_points:
self.vtk_data.GetPointData().AddArray(d)
else:
raise ValueError(f'Invalid shape {data.shape[0]}')
raise ValueError(f'Cell / point count ({N_cells} / {N_points}) differs from data ({N_data}).')
elif isinstance(data,pd.DataFrame):
raise NotImplementedError('pd.DataFrame')
elif isinstance(data,Table):
@ -272,7 +274,7 @@ class VTK:
if point_data.GetArrayName(a) == label:
return vtk_to_np(point_data.GetArray(a))
raise ValueError(f'array "{label}" not found')
raise ValueError(f'Array "{label}" not found.')
def get_comments(self):

View File

@ -105,7 +105,7 @@ class TestOrientation:
if update:
coords = np.array([(1,i+1) for i,x in enumerate(eu)])
table = Table(eu,{'Eulers':(3,)})
table.add('pos',coords)
table = table.add('pos',coords)
table.to_ASCII(reference)
assert np.allclose(eu,Table.from_ASCII(reference).get('Eulers'))

View File

@ -81,13 +81,11 @@ class TestTable:
Table.from_ASCII(f)
def test_set(self,default):
default.set('F',np.zeros((5,3,3)),'set to zero')
d=default.get('F')
d = default.set('F',np.zeros((5,3,3)),'set to zero').get('F')
assert np.allclose(d,0.0) and d.shape[1:] == (3,3)
def test_set_component(self,default):
default.set('1_F',np.zeros((5)),'set to zero')
d=default.get('F')
d = default.set('1_F',np.zeros((5)),'set to zero').get('F')
assert np.allclose(d[...,0,0],0.0) and d.shape[1:] == (3,3)
def test_labels(self,default):
@ -95,36 +93,34 @@ class TestTable:
def test_add(self,default):
d = np.random.random((5,9))
default.add('nine',d,'random data')
assert np.allclose(d,default.get('nine'))
assert np.allclose(d,default.add('nine',d,'random data').get('nine'))
def test_rename_equivalent(self):
x = np.random.random((5,13))
t = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data'])
s = t.get('s')
t.rename('s','u')
u = t.get('u')
u = t.rename('s','u').get('u')
assert np.all(s == u)
def test_rename_gone(self,default):
default.rename('v','V')
assert 'v' not in default.shapes and 'v' not in default.data.columns
gone = default.rename('v','V')
assert 'v' not in gone.shapes and 'v' not in gone.data.columns
with pytest.raises(KeyError):
default.get('v')
gone.get('v')
def test_delete(self,default):
default.delete('v')
assert 'v' not in default.shapes and 'v' not in default.data.columns
delete = default.delete('v')
assert 'v' not in delete.shapes and 'v' not in delete.data.columns
with pytest.raises(KeyError):
default.get('v')
delete.get('v')
def test_join(self):
x = np.random.random((5,13))
a = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data'])
y = np.random.random((5,3))
b = Table(y,{'u':(3,)},['random test data'])
a.join(b)
assert np.array_equal(a.get('u'), b.get('u'))
c = a.join(b)
assert np.array_equal(c.get('u'), b.get('u'))
def test_join_invalid(self):
x = np.random.random((5,13))
@ -135,8 +131,8 @@ class TestTable:
def test_append(self):
x = np.random.random((5,13))
a = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data'])
a.append(a)
assert np.array_equal(a.data[:5].to_numpy(),a.data[5:].to_numpy())
b = a.append(a)
assert np.array_equal(b.data[:5].to_numpy(),b.data[5:].to_numpy())
def test_append_invalid(self):
x = np.random.random((5,13))
@ -163,29 +159,26 @@ class TestTable:
x = np.random.random((5,13))
t = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data'])
unsort = t.get('s')
t.sort_by('s')
sort = t.get('s')
sort = t.sort_by('s').get('s')
assert np.all(np.sort(unsort,0)==sort)
def test_sort_component(self):
x = np.random.random((5,12))
t = Table(x,{'F':(3,3),'v':(3,)},['random test data'])
unsort = t.get('4_F')
t.sort_by('4_F')
sort = t.get('4_F')
sort = t.sort_by('4_F').get('4_F')
assert np.all(np.sort(unsort,0)==sort)
def test_sort_revert(self):
x = np.random.random((5,12))
t = Table(x,{'F':(3,3),'v':(3,)},['random test data'])
t.sort_by('4_F',ascending=False)
sort = t.get('4_F')
sort = t.sort_by('4_F',ascending=False).get('4_F')
assert np.all(np.sort(sort,0)==sort[::-1,:])
def test_sort(self):
t = Table(np.array([[0,1,],[2,1,]]),
{'v':(2,)},
['test data'])
t.add('s',np.array(['b','a']))
t.sort_by('s')
['test data'])\
.add('s',np.array(['b','a']))\
.sort_by('s')
assert np.all(t.get('1_v') == np.array([2,0]).reshape(2,1))

View File

@ -14,6 +14,7 @@ module CPFEM2
use material
use lattice
use IO
use base64
use DAMASK_interface
use results
use discretization
@ -42,6 +43,7 @@ subroutine CPFEM_initAll
call DAMASK_interface_init ! Spectral and FEM interface to commandline
call prec_init
call IO_init
call base64_init
#ifdef Mesh
call FEM_quadrature_init
#endif

View File

@ -6,6 +6,7 @@
#include <signal.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "zlib.h"
/* http://stackoverflow.com/questions/30279228/is-there-an-alternative-to-getcwd-in-fortran-2003-2008 */
@ -57,3 +58,17 @@ void signalusr1_c(void (*handler)(int)){
void signalusr2_c(void (*handler)(int)){
signal(SIGUSR2, handler);
}
void inflate_c(const uLong *s_deflated, const uLong *s_inflated, const Byte deflated[], Byte inflated[]){
/* make writable copy, uncompress will write to it */
uLong s_inflated_;
s_inflated_ = *s_inflated;
if(uncompress((Bytef *)inflated, &s_inflated_, (Bytef *)deflated, *s_deflated) == Z_OK)
return;
else{
for(uLong i=0;i<*s_inflated;i++){
inflated[i] = 0;
}
}
}

View File

@ -10,6 +10,7 @@ module IO
implicit none
private
character(len=*), parameter, public :: &
IO_WHITESPACE = achar(44)//achar(32)//achar(9)//achar(10)//achar(13) !< whitespace characters
character, parameter, public :: &
@ -50,7 +51,7 @@ contains
!--------------------------------------------------------------------------------------------------
!> @brief does nothing.
!> @brief do self test
!--------------------------------------------------------------------------------------------------
subroutine IO_init
@ -447,6 +448,9 @@ subroutine IO_error(error_ID,el,ip,g,instance,ext_msg)
msg = 'invalid character for float:'
case (113)
msg = 'invalid character for logical:'
case (114)
msg = 'cannot decode base64 string:'
!--------------------------------------------------------------------------------------------------
! lattice error messages
case (130)
@ -581,6 +585,8 @@ subroutine IO_error(error_ID,el,ip,g,instance,ext_msg)
msg = 'incomplete information in grid mesh header'
case (843)
msg = 'microstructure count mismatch'
case (844)
msg = 'invalid VTR file'
case (846)
msg = 'rotation for load case rotation ill-defined (R:RT != I)'
case (891)

226
src/base64.f90 Normal file
View File

@ -0,0 +1,226 @@
!--------------------------------------------------------------------------------------------------
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
!> @brief Decode Base64 strings.
!> @details See https://en.wikipedia.org/wiki/Base64.
!--------------------------------------------------------------------------------------------------
module base64
use prec
use IO
implicit none
private
character(len=*), parameter :: &
base64_encoding='ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789+/'
public :: &
base64_init, &
base64_to_bytes, &
base64_nChar, &
base64_nByte
contains
!--------------------------------------------------------------------------------------------------
!> @brief Do self test.
!--------------------------------------------------------------------------------------------------
subroutine base64_init
write(6,'(/,a)') ' <<<+- base64 init -+>>>'; flush(6)
call selfTest
end subroutine base64_init
!--------------------------------------------------------------------------------------------------
!> @brief Calculate number of Base64 characters required for storage of N bytes.
!--------------------------------------------------------------------------------------------------
pure function base64_nChar(nByte)
integer(pI64), intent(in) :: nByte
integer(pI64) :: base64_nChar
base64_nChar = 4_pI64 * (nByte/3_pI64 + merge(1_pI64,0_pI64,mod(nByte,3_pI64) /= 0_pI64))
end function base64_nChar
!--------------------------------------------------------------------------------------------------
!> @brief Calculate number of bytes required for storage of N Base64 characters.
!--------------------------------------------------------------------------------------------------
pure function base64_nByte(nBase64)
integer(pI64), intent(in) :: nBase64
integer(pI64) :: base64_nByte
base64_nByte = 3_pI64 * (nBase64/4_pI64)
end function base64_nByte
!--------------------------------------------------------------------------------------------------
!> @brief Decode Base64 ASCII string into byte-wise binary representation.
!--------------------------------------------------------------------------------------------------
function base64_to_bytes(base64_str,s,e) result(bytes)
character(len=*), intent(in) :: base64_str !< Base64 string representation
integer(pI64), intent(in), optional :: &
s, & !< start (in bytes)
e !< end (in bytes)
integer(pI64) :: s_bytes, e_bytes, s_str, e_str
integer(C_SIGNED_CHAR), dimension(:), allocatable :: bytes
if(.not. validBase64(base64_str)) call IO_error(114,ext_msg='invalid character')
if(present(s)) then
if(s<1_pI64) call IO_error(114, ext_msg='s out of range')
s_str = ((s-1_pI64)/3_pI64)*4_pI64 + 1_pI64
s_bytes = mod(s-1_pI64,3_pI64) + 1_pI64
else
s_str = 1_pI64
s_bytes = 1_pI64
endif
if(present(e)) then
if(e>base64_nByte(len(base64_str,kind=pI64))) call IO_error(114, ext_msg='e out of range')
e_str = ((e-1_pI64)/3_pI64)*4_pI64 + 4_pI64
e_bytes = e - base64_nByte(s_str)
else
e_str = len(base64_str,kind=pI64)
e_bytes = base64_nByte(len(base64_str,kind=pI64)) - base64_nByte(s_str)
if(base64_str(e_str-0_pI64:e_str-0_pI64) == '=') e_bytes = e_bytes - 1_pI64
if(base64_str(e_str-1_pI64:e_str-1_pI64) == '=') e_bytes = e_bytes - 1_pI64
endif
bytes = decodeBase64(base64_str(s_str:e_str))
bytes = bytes(s_bytes:e_bytes)
end function base64_to_bytes
!--------------------------------------------------------------------------------------------------
!> @brief Convert a Base64 ASCII string into its byte-wise binary representation.
!--------------------------------------------------------------------------------------------------
pure function decodeBase64(base64_str) result(bytes)
character(len=*), intent(in) :: base64_str !< Base64 string representation
integer(C_SIGNED_CHAR), dimension(base64_nByte(len(base64_str,pI64))) :: bytes
integer(C_SIGNED_CHAR), dimension(0:3) :: charPos
integer(pI64) :: c, b, p
c = 1_pI64
b = 1_pI64
do while(c < len(base64_str,kind=pI64))
do p=0_pI64,3_pI64
if(c+p<=len(base64_str,kind=pI64)) then
charPos(p) = int(index(base64_encoding,base64_str(c+p:c+p))-1,C_SIGNED_CHAR)
else
charPos(p) = 0_C_SIGNED_CHAR
endif
enddo
call mvbits(charPos(0),0,6,bytes(b+0),2)
call mvbits(charPos(1),4,2,bytes(b+0),0)
call mvbits(charPos(1),0,4,bytes(b+1),4)
call mvbits(charPos(2),2,4,bytes(b+1),0)
call mvbits(charPos(2),0,2,bytes(b+2),6)
call mvbits(charPos(3),0,6,bytes(b+2),0)
b = b+3_pI64
c = c+4_pI64
enddo
end function decodeBase64
!--------------------------------------------------------------------------------------------------
!> @brief Test for valid Base64 encoded string.
!> @details Input string must be properly padded.
!--------------------------------------------------------------------------------------------------
pure logical function validBase64(base64_str)
character(len=*), intent(in) :: base64_str !< Base64 string representation
integer(pI64) :: l
l = len(base64_str,pI64)
validBase64 = .true.
if(mod(l,4_pI64)/=0_pI64 .or. l < 4_pInt) validBase64 = .false.
if(verify(base64_str(:l-2_pI64),base64_encoding, kind=pI64) /= 0_pI64) validBase64 = .false.
if(verify(base64_str(l-1_pI64:),base64_encoding//'=',kind=pI64) /= 0_pI64) validBase64 = .false.
end function validBase64
!--------------------------------------------------------------------------------------------------
!> @brief Check correctness of base64 functions.
!--------------------------------------------------------------------------------------------------
subroutine selfTest
integer(C_SIGNED_CHAR), dimension(:), allocatable :: bytes
character(len=*), parameter :: zero_to_three = 'AAECAw=='
! https://en.wikipedia.org/wiki/Base64#Output_padding
if(base64_nChar(20_pI64) /= 28_pI64) call IO_error(0,ext_msg='base64_nChar/20/28')
if(base64_nChar(19_pI64) /= 28_pI64) call IO_error(0,ext_msg='base64_nChar/19/28')
if(base64_nChar(18_pI64) /= 24_pI64) call IO_error(0,ext_msg='base64_nChar/18/24')
if(base64_nChar(17_pI64) /= 24_pI64) call IO_error(0,ext_msg='base64_nChar/17/24')
if(base64_nChar(16_pI64) /= 24_pI64) call IO_error(0,ext_msg='base64_nChar/16/24')
if(base64_nByte(4_pI64) /= 3_pI64) call IO_error(0,ext_msg='base64_nByte/4/3')
if(base64_nByte(8_pI64) /= 6_pI64) call IO_error(0,ext_msg='base64_nByte/8/6')
bytes = base64_to_bytes(zero_to_three)
if(any(bytes /= int([0,1,2,3],C_SIGNED_CHAR)) .or. size(bytes) /= 4) call IO_error(0,ext_msg='base64_to_bytes//')
bytes = base64_to_bytes(zero_to_three,e=1_pI64)
if(any(bytes /= int([0],C_SIGNED_CHAR)) .or. size(bytes) /= 1) call IO_error(0,ext_msg='base64_to_bytes//1')
bytes = base64_to_bytes(zero_to_three,e=2_pI64)
if(any(bytes /= int([0,1],C_SIGNED_CHAR)) .or. size(bytes) /= 2) call IO_error(0,ext_msg='base64_to_bytes//2')
bytes = base64_to_bytes(zero_to_three,e=3_pI64)
if(any(bytes /= int([0,1,2],C_SIGNED_CHAR)) .or. size(bytes) /= 3) call IO_error(0,ext_msg='base64_to_bytes//3')
bytes = base64_to_bytes(zero_to_three,e=4_pI64)
if(any(bytes /= int([0,1,2,3],C_SIGNED_CHAR)) .or. size(bytes) /= 4) call IO_error(0,ext_msg='base64_to_bytes//4')
bytes = base64_to_bytes(zero_to_three,s=1_pI64)
if(any(bytes /= int([0,1,2,3],C_SIGNED_CHAR)) .or. size(bytes) /= 4) call IO_error(0,ext_msg='base64_to_bytes/1/')
bytes = base64_to_bytes(zero_to_three,s=2_pI64)
if(any(bytes /= int([1,2,3],C_SIGNED_CHAR)) .or. size(bytes) /= 3) call IO_error(0,ext_msg='base64_to_bytes/2/')
bytes = base64_to_bytes(zero_to_three,s=3_pI64)
if(any(bytes /= int([2,3],C_SIGNED_CHAR)) .or. size(bytes) /= 2) call IO_error(0,ext_msg='base64_to_bytes/3/')
bytes = base64_to_bytes(zero_to_three,s=4_pI64)
if(any(bytes /= int([3],C_SIGNED_CHAR)) .or. size(bytes) /= 1) call IO_error(0,ext_msg='base64_to_bytes/4/')
bytes = base64_to_bytes(zero_to_three,s=1_pI64,e=1_pI64)
if(any(bytes /= int([0],C_SIGNED_CHAR)) .or. size(bytes) /= 1) call IO_error(0,ext_msg='base64_to_bytes/1/1')
bytes = base64_to_bytes(zero_to_three,s=2_pI64,e=2_pI64)
if(any(bytes /= int([1],C_SIGNED_CHAR)) .or. size(bytes) /= 1) call IO_error(0,ext_msg='base64_to_bytes/2/2')
bytes = base64_to_bytes(zero_to_three,s=3_pI64,e=3_pI64)
if(any(bytes /= int([2],C_SIGNED_CHAR)) .or. size(bytes) /= 1) call IO_error(0,ext_msg='base64_to_bytes/3/3')
bytes = base64_to_bytes(zero_to_three,s=4_pI64,e=4_pI64)
if(any(bytes /= int([3],C_SIGNED_CHAR)) .or. size(bytes) /= 1) call IO_error(0,ext_msg='base64_to_bytes/4/4')
bytes = base64_to_bytes(zero_to_three,s=1_pI64,e=2_pI64)
if(any(bytes /= int([0,1],C_SIGNED_CHAR)) .or. size(bytes) /= 2) call IO_error(0,ext_msg='base64_to_bytes/1/2')
bytes = base64_to_bytes(zero_to_three,s=2_pI64,e=3_pI64)
if(any(bytes /= int([1,2],C_SIGNED_CHAR)) .or. size(bytes) /= 2) call IO_error(0,ext_msg='base64_to_bytes/2/3')
bytes = base64_to_bytes(zero_to_three,s=3_pI64,e=4_pI64)
if(any(bytes /= int([2,3],C_SIGNED_CHAR)) .or. size(bytes) /= 2) call IO_error(0,ext_msg='base64_to_bytes/3/4')
bytes = base64_to_bytes(zero_to_three,s=1_pI64,e=3_pI64)
if(any(bytes /= int([0,1,2],C_SIGNED_CHAR)) .or. size(bytes) /= 3) call IO_error(0,ext_msg='base64_to_bytes/1/3')
bytes = base64_to_bytes(zero_to_three,s=2_pI64,e=4_pI64)
if(any(bytes /= int([1,2,3],C_SIGNED_CHAR)) .or. size(bytes) /= 3) call IO_error(0,ext_msg='base64_to_bytes/2/4')
bytes = base64_to_bytes(zero_to_three,s=1_pI64,e=4_pI64)
if(any(bytes /= int([0,1,2,3],C_SIGNED_CHAR)) .or. size(bytes) /= 4) call IO_error(0,ext_msg='base64_to_bytes/1/4')
end subroutine selfTest
end module base64

View File

@ -255,7 +255,7 @@ subroutine crystallite_init
do e = FEsolving_execElem(1),FEsolving_execElem(2)
myNcomponents = homogenization_Ngrains(material_homogenizationAt(e))
do i = FEsolving_execIP(1), FEsolving_execIP(2); do c = 1, myNcomponents
crystallite_Fp0(1:3,1:3,c,i,e) = material_orientation0(c,i,e)%asMatrix() ! plastic def gradient reflects init orientation
crystallite_Fp0(1:3,1:3,c,i,e) = material_orientation0(c,i,e)%asMatrix() ! Fp reflects initial orientation (see 10.1016/j.actamat.2006.01.005)
crystallite_Fp0(1:3,1:3,c,i,e) = crystallite_Fp0(1:3,1:3,c,i,e) &
/ math_det33(crystallite_Fp0(1:3,1:3,c,i,e))**(1.0_pReal/3.0_pReal)
crystallite_Fi0(1:3,1:3,c,i,e) = constitutive_initialFi(c,i,e)

View File

@ -10,6 +10,8 @@ module discretization_grid
use prec
use system_routines
use base64
use zlib
use DAMASK_interface
use IO
use config
@ -64,7 +66,11 @@ subroutine discretization_grid_init(restart)
write(6,'(/,a)') ' <<<+- discretization_grid init -+>>>'; flush(6)
call readGeom(grid,geomSize,origin,microstructureAt)
if(index(geometryFile,'.vtr') /= 0) then
call readVTR(grid,geomSize,origin,microstructureAt)
else
call readGeom(grid,geomSize,origin,microstructureAt)
endif
!--------------------------------------------------------------------------------------------------
! grid solver specific quantities
@ -139,10 +145,10 @@ end subroutine discretization_grid_init
subroutine readGeom(grid,geomSize,origin,microstructure)
integer, dimension(3), intent(out) :: &
grid ! grid (for all processes!)
grid ! grid (across all processes!)
real(pReal), dimension(3), intent(out) :: &
geomSize, & ! size (for all processes!)
origin ! origin (for all processes!)
geomSize, & ! size (across all processes!)
origin ! origin (across all processes!)
integer, dimension(:), intent(out), allocatable :: &
microstructure
@ -150,7 +156,6 @@ subroutine readGeom(grid,geomSize,origin,microstructure)
character(len=65536) :: line
integer, allocatable, dimension(:) :: chunkPos
integer :: &
h =- 1, &
headerLength = -1, & !< length of header (in lines)
fileLength, & !< length of the geom file (in characters)
fileUnit, &
@ -189,7 +194,7 @@ subroutine readGeom(grid,geomSize,origin,microstructure)
endif
!--------------------------------------------------------------------------------------------------
! read and interprete header
! read and interpret header
origin = 0.0_pReal
l = 0
do while (l < headerLength .and. startPos < len(rawData))
@ -294,6 +299,360 @@ subroutine readGeom(grid,geomSize,origin,microstructure)
end subroutine readGeom
!--------------------------------------------------------------------------------------------------
!> @brief Parse vtk rectilinear grid (.vtr)
!> @details https://vtk.org/Wiki/VTK_XML_Formats
!--------------------------------------------------------------------------------------------------
subroutine readVTR(grid,geomSize,origin,microstructure)
integer, dimension(3), intent(out) :: &
grid ! grid (across all processes!)
real(pReal), dimension(3), intent(out) :: &
geomSize, & ! size (across all processes!)
origin ! origin (across all processes!)
integer, dimension(:), intent(out), allocatable :: &
microstructure
character(len=:), allocatable :: fileContent, dataType, headerType
logical :: inFile,inGrid,gotCoordinates,gotCellData,compressed
integer :: fileUnit, myStat, coord
integer(pI64) :: &
fileLength, & !< length of the geom file (in characters)
startPos, endPos, &
s
grid = -1
geomSize = -1.0_pReal
!--------------------------------------------------------------------------------------------------
! read raw data as stream
inquire(file = trim(geometryFile), size=fileLength)
open(newunit=fileUnit, file=trim(geometryFile), access='stream',&
status='old', position='rewind', action='read',iostat=myStat)
if(myStat /= 0) call IO_error(100,ext_msg=trim(geometryFile))
allocate(character(len=fileLength)::fileContent)
read(fileUnit) fileContent
close(fileUnit)
inFile = .false.
inGrid = .false.
gotCoordinates = .false.
gotCelldata = .false.
!--------------------------------------------------------------------------------------------------
! interpret XML file
startPos = 1_pI64
do while (startPos < len(fileContent,kind=pI64))
endPos = startPos + index(fileContent(startPos:),IO_EOL,kind=pI64) - 2_pI64
if (endPos < startPos) endPos = len(fileContent,kind=pI64) ! end of file without new line
if(.not. inFile) then
if(index(fileContent(startPos:endPos),'<VTKFile',kind=pI64) /= 0_pI64) then
inFile = .true.
if(.not. fileFormatOk(fileContent(startPos:endPos))) call IO_error(error_ID = 844, ext_msg='file format')
headerType = merge('UInt64','UInt32',getXMLValue(fileContent(startPos:endPos),'header_type')=='UInt64')
compressed = getXMLValue(fileContent(startPos:endPos),'compressor') == 'vtkZLibDataCompressor'
endif
else
if(.not. inGrid) then
if(index(fileContent(startPos:endPos),'<RectilinearGrid',kind=pI64) /= 0_pI64) inGrid = .true.
else
if(index(fileContent(startPos:endPos),'<CellData>',kind=pI64) /= 0_pI64) then
gotCellData = .true.
startPos = endPos + 2_pI64
do while (index(fileContent(startPos:endPos),'</CellData>',kind=pI64) == 0_pI64)
endPos = startPos + index(fileContent(startPos:),IO_EOL,kind=pI64) - 2_pI64
if(index(fileContent(startPos:endPos),'<DataArray',kind=pI64) /= 0_pI64 .and. &
getXMLValue(fileContent(startPos:endPos),'Name') == 'materialpoint' ) then
if(getXMLValue(fileContent(startPos:endPos),'format') /= 'binary') &
call IO_error(error_ID = 844, ext_msg='format (materialpoint)')
dataType = getXMLValue(fileContent(startPos:endPos),'type')
startPos = endPos + 2_pI64
endPos = startPos + index(fileContent(startPos:),IO_EOL,kind=pI64) - 2_pI64
s = startPos + verify(fileContent(startPos:endPos),IO_WHITESPACE,kind=pI64) -1_pI64 ! start (no leading whitespace)
microstructure = as_Int(fileContent(s:endPos),headerType,compressed,dataType)
exit
endif
startPos = endPos + 2_pI64
enddo
elseif(index(fileContent(startPos:endPos),'<Coordinates>',kind=pI64) /= 0_pI64) then
gotCoordinates = .true.
startPos = endPos + 2_pI64
coord = 0
do while (startPos<fileLength)
endPos = startPos + index(fileContent(startPos:),IO_EOL,kind=pI64) - 2_pI64
if(index(fileContent(startPos:endPos),'<DataArray',kind=pI64) /= 0_pI64) then
if(getXMLValue(fileContent(startPos:endPos),'format') /= 'binary') &
call IO_error(error_ID = 844, ext_msg='format (coordinates)')
dataType = getXMLValue(fileContent(startPos:endPos),'type')
startPos = endPos + 2_pI64
endPos = startPos + index(fileContent(startPos:),IO_EOL,kind=pI64) - 2_pI64
s = startPos + verify(fileContent(startPos:endPos),IO_WHITESPACE,kind=pI64) -1_pI64 ! start (no leading whitespace)
coord = coord + 1
call gridSizeOrigin(fileContent(s:endPos),headerType,compressed,dataType,coord)
endif
if(index(fileContent(startPos:endPos),'</Coordinates>',kind=pI64) /= 0_pI64) exit
startPos = endPos + 2_pI64
enddo
endif
endif
endif
if(gotCellData .and. gotCoordinates) exit
startPos = endPos + 2_pI64
end do
if(.not. allocated(microstructure)) call IO_error(error_ID = 844, ext_msg='materialpoint not found')
if(size(microstructure) /= product(grid)) call IO_error(error_ID = 844, ext_msg='size(materialpoint)')
if(any(geomSize<=0)) call IO_error(error_ID = 844, ext_msg='size')
if(any(grid<1)) call IO_error(error_ID = 844, ext_msg='grid')
contains
!------------------------------------------------------------------------------------------------
!> @brief determine size and origin from coordinates
!------------------------------------------------------------------------------------------------
subroutine gridSizeOrigin(base64_str,headerType,compressed,dataType,direction)
character(len=*), intent(in) :: base64_str, & ! base64 encoded string of 1D coordinates
headerType, & ! header type (UInt32 or Uint64)
dataType ! data type (Int32, Int64, Float32, Float64)
logical, intent(in) :: compressed ! indicate whether data is zlib compressed
integer, intent(in) :: direction ! direction (1=x,2=y,3=z)
real(pReal), dimension(:), allocatable :: coords,delta
coords = as_pReal(base64_str,headerType,compressed,dataType)
delta = coords(2:) - coords(:size(coords)-1)
if(any(delta<0.0_pReal) .or. dNeq(maxval(delta),minval(delta))) &
call IO_error(error_ID = 844, ext_msg = 'grid spacing')
grid(direction) = size(coords)-1
origin(direction) = coords(1)
geomSize(direction) = coords(size(coords)) - coords(1)
end subroutine
!------------------------------------------------------------------------------------------------
!> @brief Interpret Base64 string in vtk XML file as integer of default kind
!------------------------------------------------------------------------------------------------
function as_Int(base64_str,headerType,compressed,dataType)
character(len=*), intent(in) :: base64_str, & ! base64 encoded string
headerType, & ! header type (UInt32 or Uint64)
dataType ! data type (Int32, Int64, Float32, Float64)
logical, intent(in) :: compressed ! indicate whether data is zlib compressed
integer, dimension(:), allocatable :: as_Int
select case(dataType)
case('Int32')
as_Int = int(bytes_to_C_INT32_T(asBytes(base64_str,headerType,compressed)))
case('Int64')
as_Int = int(bytes_to_C_INT64_T(asBytes(base64_str,headerType,compressed)))
case('Float32')
as_Int = int(bytes_to_C_FLOAT (asBytes(base64_str,headerType,compressed)))
case('Float64')
as_Int = int(bytes_to_C_DOUBLE (asBytes(base64_str,headerType,compressed)))
case default
call IO_error(844_pInt,ext_msg='unknown data type: '//trim(dataType))
end select
end function as_Int
!------------------------------------------------------------------------------------------------
!> @brief Interpret Base64 string in vtk XML file as integer of pReal kind
!------------------------------------------------------------------------------------------------
function as_pReal(base64_str,headerType,compressed,dataType)
character(len=*), intent(in) :: base64_str, & ! base64 encoded string
headerType, & ! header type (UInt32 or Uint64)
dataType ! data type (Int32, Int64, Float32, Float64)
logical, intent(in) :: compressed ! indicate whether data is zlib compressed
real(pReal), dimension(:), allocatable :: as_pReal
select case(dataType)
case('Int32')
as_pReal = real(bytes_to_C_INT32_T(asBytes(base64_str,headerType,compressed)),pReal)
case('Int64')
as_pReal = real(bytes_to_C_INT64_T(asBytes(base64_str,headerType,compressed)),pReal)
case('Float32')
as_pReal = real(bytes_to_C_FLOAT (asBytes(base64_str,headerType,compressed)),pReal)
case('Float64')
as_pReal = real(bytes_to_C_DOUBLE (asBytes(base64_str,headerType,compressed)),pReal)
case default
call IO_error(844_pInt,ext_msg='unknown data type: '//trim(dataType))
end select
end function as_pReal
!------------------------------------------------------------------------------------------------
!> @brief Interpret Base64 string in vtk XML file as bytes
!------------------------------------------------------------------------------------------------
function asBytes(base64_str,headerType,compressed) result(bytes)
character(len=*), intent(in) :: base64_str, & ! base64 encoded string
headerType ! header type (UInt32 or Uint64)
logical, intent(in) :: compressed ! indicate whether data is zlib compressed
integer(C_SIGNED_CHAR), dimension(:), allocatable :: bytes
if(compressed) then
bytes = asBytes_compressed(base64_str,headerType)
else
bytes = asBytes_uncompressed(base64_str,headerType)
endif
end function asBytes
!------------------------------------------------------------------------------------------------
!> @brief Interpret compressed Base64 string in vtk XML file as bytes
!> @details A compressed Base64 string consists of a header block and a data block
! [#blocks/#u-size/#p-size/#c-size-1/#c-size-2/.../#c-size-#blocks][DATA-1/DATA-2...]
! #blocks = Number of blocks
! #u-size = Block size before compression
! #p-size = Size of last partial block (zero if it not needed)
! #c-size-i = Size in bytes of block i after compression
!------------------------------------------------------------------------------------------------
function asBytes_compressed(base64_str,headerType) result(bytes)
character(len=*), intent(in) :: base64_str, & ! base64 encoded string
headerType ! header type (UInt32 or Uint64)
integer(C_SIGNED_CHAR), dimension(:), allocatable :: bytes, bytes_inflated
integer(pI64), dimension(:), allocatable :: temp, size_inflated, size_deflated
integer(pI64) :: headerLen, nBlock, b,s,e
if (headerType == 'UInt32') then
temp = int(bytes_to_C_INT32_T(base64_to_bytes(base64_str(:base64_nChar(4_pI64)))),pI64)
nBlock = int(temp(1),pI64)
headerLen = 4_pI64 * (3_pI64 + nBlock)
temp = int(bytes_to_C_INT32_T(base64_to_bytes(base64_str(:base64_nChar(headerLen)))),pI64)
elseif(headerType == 'UInt64') then
temp = int(bytes_to_C_INT64_T(base64_to_bytes(base64_str(:base64_nChar(8_pI64)))),pI64)
nBlock = int(temp(1),pI64)
headerLen = 8_pI64 * (3_pI64 + nBlock)
temp = int(bytes_to_C_INT64_T(base64_to_bytes(base64_str(:base64_nChar(headerLen)))),pI64)
endif
allocate(size_inflated(nBlock),source=temp(2))
size_inflated(nBlock) = merge(temp(3),temp(2),temp(3)/=0_pI64)
size_deflated = temp(4:)
bytes_inflated = base64_to_bytes(base64_str(base64_nChar(headerLen)+1_pI64:))
allocate(bytes(0))
e = 0_pI64
do b = 1, nBlock
s = e + 1_pI64
e = s + size_deflated(b) - 1_pI64
bytes = [bytes,zlib_inflate(bytes_inflated(s:e),size_inflated(b))]
enddo
end function asBytes_compressed
!------------------------------------------------------------------------------------------------
!> @brief Interprete uncompressed Base64 string in vtk XML file as bytes
!> @details An uncompressed Base64 string consists of N headers blocks and a N data blocks
![#bytes-1/DATA-1][#bytes-2/DATA-2]...
!------------------------------------------------------------------------------------------------
function asBytes_uncompressed(base64_str,headerType) result(bytes)
character(len=*), intent(in) :: base64_str, & ! base64 encoded string
headerType ! header type (UInt32 or Uint64)
integer(pI64) :: s
integer(pI64), dimension(1) :: nByte
integer(C_SIGNED_CHAR), dimension(:), allocatable :: bytes
allocate(bytes(0))
s=0_pI64
if (headerType == 'UInt32') then
do while(s+base64_nChar(4_pI64)<(len(base64_str,pI64)))
nByte = int(bytes_to_C_INT32_T(base64_to_bytes(base64_str(s+1_pI64:s+base64_nChar(4_pI64)))),pI64)
bytes = [bytes,base64_to_bytes(base64_str(s+1_pI64:s+base64_nChar(4_pI64+nByte(1))),5_pI64)]
s = s + base64_nChar(4_pI64+nByte(1))
enddo
elseif(headerType == 'UInt64') then
do while(s+base64_nChar(8_pI64)<(len(base64_str,pI64)))
nByte = int(bytes_to_C_INT64_T(base64_to_bytes(base64_str(s+1_pI64:s+base64_nChar(8_pI64)))),pI64)
bytes = [bytes,base64_to_bytes(base64_str(s+1_pI64:s+base64_nChar(8_pI64+nByte(1))),9_pI64)]
s = s + base64_nChar(8_pI64+nByte(1))
enddo
endif
end function asBytes_uncompressed
!------------------------------------------------------------------------------------------------
!> @brief Get XML string value for given key
!------------------------------------------------------------------------------------------------
pure function getXMLValue(line,key)
character(len=*), intent(in) :: line, key
character(len=:), allocatable :: getXMLValue
integer :: s,e
#ifdef __INTEL_COMPILER
character :: q
#endif
s = index(line," "//key,back=.true.)
if(s==0) then
getXMLValue = ''
else
e = s + 1 + scan(line(s+1:),"'"//'"')
if(scan(line(s:e-2),'=') == 0) then
getXMLValue = ''
else
s = e
! https://community.intel.com/t5/Intel-Fortran-Compiler/ICE-for-merge-with-strings/m-p/1207204#M151657
#ifdef __INTEL_COMPILER
q = line(s-1:s-1)
e = s + index(line(s:),q) - 1
#else
e = s + index(line(s:),merge("'",'"',line(s-1:s-1)=="'")) - 1
#endif
getXMLValue = line(s:e-1)
endif
endif
end function
!------------------------------------------------------------------------------------------------
!> @brief check for supported file format
!------------------------------------------------------------------------------------------------
pure function fileFormatOk(line)
character(len=*),intent(in) :: line
logical :: fileFormatOk
fileFormatOk = getXMLValue(line,'type') == 'RectilinearGrid' .and. &
getXMLValue(line,'byte_order') == 'LittleEndian' .and. &
getXMLValue(line,'compressor') /= 'vtkLZ4DataCompressor' .and. &
getXMLValue(line,'compressor') /= 'vtkLZMADataCompressor'
end function fileFormatOk
end subroutine readVTR
!---------------------------------------------------------------------------------------------------
!> @brief Calculate undeformed position of IPs/cell centers (pretend to be an element)
!---------------------------------------------------------------------------------------------------

View File

@ -8,18 +8,20 @@
!--------------------------------------------------------------------------------------------------
module prec
use, intrinsic :: IEEE_arithmetic
use, intrinsic :: ISO_C_Binding
implicit none
public
! https://software.intel.com/en-us/blogs/2017/03/27/doctor-fortran-in-it-takes-all-kinds
integer, parameter :: pReal = IEEE_selected_real_kind(15,307) !< number with 15 significant digits, up to 1e+-307 (typically 64 bit)
integer, parameter :: pI32 = selected_int_kind(9) !< number with at least up to +-1e9 (typically 32 bit)
integer, parameter :: pI64 = selected_int_kind(18) !< number with at least up to +-1e18 (typically 64 bit)
#if(INT==8)
integer, parameter :: pInt = selected_int_kind(18) !< number with at least up to +-1e18 (typically 64 bit)
integer, parameter :: pInt = pI64
#else
integer, parameter :: pInt = selected_int_kind(9) !< number with at least up to +-1e9 (typically 32 bit)
integer, parameter :: pInt = pI32
#endif
integer, parameter :: pLongInt = selected_int_kind(18) !< number with at least up to +-1e18 (typically 64 bit)
integer, parameter :: pStringLen = 256 !< default string length
integer, parameter :: pPathLen = 4096 !< maximum length of a path name on linux
@ -81,7 +83,7 @@ contains
!--------------------------------------------------------------------------------------------------
!> @brief reporting precision
!> @brief report precision and do self test
!--------------------------------------------------------------------------------------------------
subroutine prec_init
@ -100,7 +102,7 @@ end subroutine prec_init
!--------------------------------------------------------------------------------------------------
!> @brief equality comparison for float with double precision
!> @brief Test floating point numbers with double precision for equality.
! replaces "==" but for certain (relative) tolerance. Counterpart to dNeq
! https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
! AlmostEqualRelative
@ -123,7 +125,7 @@ end function dEq
!--------------------------------------------------------------------------------------------------
!> @brief inequality comparison for float with double precision
!> @brief Test floating point numbers with double precision for inequality.
! replaces "!=" but for certain (relative) tolerance. Counterpart to dEq
! https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
! AlmostEqualRelative NOT
@ -143,7 +145,7 @@ end function dNeq
!--------------------------------------------------------------------------------------------------
!> @brief equality to 0 comparison for float with double precision
!> @brief Test floating point number with double precision for equality to 0.
! replaces "==0" but everything not representable as a normal number is treated as 0. Counterpart to dNeq0
! https://de.mathworks.com/help/matlab/ref/realmin.html
! https://docs.oracle.com/cd/E19957-01/806-3568/ncg_math.html
@ -166,7 +168,7 @@ end function dEq0
!--------------------------------------------------------------------------------------------------
!> @brief inequality to 0 comparison for float with double precision
!> @brief Test floating point number with double precision for inequality to 0.
! replaces "!=0" but everything not representable as a normal number is treated as 0. Counterpart to dEq0
! https://de.mathworks.com/help/matlab/ref/realmin.html
! https://docs.oracle.com/cd/E19957-01/806-3568/ncg_math.html
@ -186,7 +188,7 @@ end function dNeq0
!--------------------------------------------------------------------------------------------------
!> @brief equality comparison for complex with double precision
!> @brief Test complex floating point numbers with double precision for equality.
! replaces "==" but for certain (relative) tolerance. Counterpart to cNeq
! https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
! probably a component wise comparison would be more accurate than the comparsion of the absolute
@ -210,7 +212,7 @@ end function cEq
!--------------------------------------------------------------------------------------------------
!> @brief inequality comparison for complex with double precision
!> @brief Test complex floating point numbers with double precision for inequality.
! replaces "!=" but for certain (relative) tolerance. Counterpart to cEq
! https://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/
! probably a component wise comparison would be more accurate than the comparsion of the absolute
@ -231,23 +233,95 @@ end function cNeq
!--------------------------------------------------------------------------------------------------
!> @brief check correctness of some prec functions
!> @brief Decode byte array (C_SIGNED_CHAR) as C_FLOAT array (4 byte float).
!--------------------------------------------------------------------------------------------------
pure function bytes_to_C_FLOAT(bytes)
integer(C_SIGNED_CHAR), dimension(:), intent(in) :: bytes !< byte-wise representation of a C_FLOAT array
real(C_FLOAT), dimension(size(bytes,kind=pI64)/(storage_size(0._C_FLOAT,pI64)/8_pI64)) :: &
bytes_to_C_FLOAT
bytes_to_C_FLOAT = transfer(bytes,bytes_to_C_FLOAT,size(bytes_to_C_FLOAT))
end function bytes_to_C_FLOAT
!--------------------------------------------------------------------------------------------------
!> @brief Decode byte array (C_SIGNED_CHAR) as C_DOUBLE array (8 byte float).
!--------------------------------------------------------------------------------------------------
pure function bytes_to_C_DOUBLE(bytes)
integer(C_SIGNED_CHAR), dimension(:), intent(in) :: bytes !< byte-wise representation of a C_DOUBLE array
real(C_DOUBLE), dimension(size(bytes,kind=pI64)/(storage_size(0._C_DOUBLE,pI64)/8_pI64)) :: &
bytes_to_C_DOUBLE
bytes_to_C_DOUBLE = transfer(bytes,bytes_to_C_DOUBLE,size(bytes_to_C_DOUBLE))
end function bytes_to_C_DOUBLE
!--------------------------------------------------------------------------------------------------
!> @brief Decode byte array (C_SIGNED_CHAR) as C_INT32_T array (4 byte signed integer).
!--------------------------------------------------------------------------------------------------
pure function bytes_to_C_INT32_T(bytes)
integer(C_SIGNED_CHAR), dimension(:), intent(in) :: bytes !< byte-wise representation of a C_INT32_T array
integer(C_INT32_T), dimension(size(bytes,kind=pI64)/(storage_size(0_C_INT32_T,pI64)/8_pI64)) :: &
bytes_to_C_INT32_T
bytes_to_C_INT32_T = transfer(bytes,bytes_to_C_INT32_T,size(bytes_to_C_INT32_T))
end function bytes_to_C_INT32_T
!--------------------------------------------------------------------------------------------------
!> @brief Decode byte array (C_SIGNED_CHAR) as C_INT64_T array (8 byte signed integer).
!--------------------------------------------------------------------------------------------------
pure function bytes_to_C_INT64_T(bytes)
integer(C_SIGNED_CHAR), dimension(:), intent(in) :: bytes !< byte-wise representation of a C_INT64_T array
integer(C_INT64_T), dimension(size(bytes,kind=pI64)/(storage_size(0_C_INT64_T,pI64)/8_pI64)) :: &
bytes_to_C_INT64_T
bytes_to_C_INT64_T = transfer(bytes,bytes_to_C_INT64_T,size(bytes_to_C_INT64_T))
end function bytes_to_C_INT64_T
!--------------------------------------------------------------------------------------------------
!> @brief Check correctness of some prec functions.
!--------------------------------------------------------------------------------------------------
subroutine selfTest
integer, allocatable, dimension(:) :: realloc_lhs_test
real(pReal), dimension(2) :: r
real(pReal), dimension(1) :: f
integer(pInt), dimension(1) :: i
real(pReal), dimension(2) :: r
external :: &
quit
realloc_lhs_test = [1,2]
if (any(realloc_lhs_test/=[1,2])) call quit(9000)
call random_number(r)
r = r/minval(r)
if(.not. all(dEq(r,r+PREAL_EPSILON))) call quit(9000)
if(dEq(r(1),r(2)) .and. dNeq(r(1),r(2))) call quit(9000)
if(.not. all(dEq0(r-(r+PREAL_MIN)))) call quit(9000)
realloc_lhs_test = [1,2]
if (any(realloc_lhs_test/=[1,2])) call quit(9000)
! https://www.binaryconvert.com
! https://www.rapidtables.com/convert/number/binary-to-decimal.html
f = real(bytes_to_C_FLOAT(int([-65,+11,-102,+75],C_SIGNED_CHAR)),pReal)
if(dNeq(f(1),20191102.0_pReal,0.0_pReal)) call quit(9000)
f = real(bytes_to_C_DOUBLE(int([0,0,0,-32,+119,+65,+115,65],C_SIGNED_CHAR)),pReal)
if(dNeq(f(1),20191102.0_pReal,0.0_pReal)) call quit(9000)
i = int(bytes_to_C_INT32_T(int([+126,+23,+52,+1],C_SIGNED_CHAR)),pInt)
if(i(1) /= 20191102_pInt) call quit(9000)
i = int(bytes_to_C_INT64_T(int([+126,+23,+52,+1,0,0,0,0],C_SIGNED_CHAR)),pInt)
if(i(1) /= 20191102_pInt) call quit(9000)
end subroutine selfTest

View File

@ -99,7 +99,7 @@ module rotations
contains
!--------------------------------------------------------------------------------------------------
!> @brief doing self test
!> @brief do self test
!--------------------------------------------------------------------------------------------------
subroutine rotations_init

44
src/zlib.f90 Normal file
View File

@ -0,0 +1,44 @@
!--------------------------------------------------------------------------------------------------
!> @author Martin Diehl, Max-Planck-Institut für Eisenforschung GmbH
!> @brief Inflate zlib compressed data
!--------------------------------------------------------------------------------------------------
module zlib
use prec
implicit none
private
public :: &
zlib_inflate
interface
subroutine inflate_C(s_deflated,s_inflated,deflated,inflated) bind(C)
use, intrinsic :: ISO_C_Binding, only: &
C_SIGNED_CHAR, C_INT64_T
integer(C_INT64_T), intent(in) :: s_deflated,s_inflated
integer(C_SIGNED_CHAR), dimension(s_deflated), intent(in) :: deflated
integer(C_SIGNED_CHAR), dimension(s_inflated), intent(out) :: inflated
end subroutine inflate_C
end interface
contains
!--------------------------------------------------------------------------------------------------
!> @brief Inflate byte-wise representation
!--------------------------------------------------------------------------------------------------
function zlib_inflate(deflated,size_inflated)
integer(C_SIGNED_CHAR), dimension(:), intent(in) :: deflated
integer(pI64), intent(in) :: size_inflated
integer(C_SIGNED_CHAR), dimension(size_inflated) :: zlib_inflate
call inflate_C(size(deflated,kind=C_INT64_T),int(size_inflated,C_INT64_T),deflated,zlib_inflate)
end function zlib_inflate
end module zlib