Merge remote-tracking branch 'origin/development' into Fortran-simplifications

This commit is contained in:
Martin Diehl 2020-09-15 19:36:59 +02:00
commit 1ff098d004
25 changed files with 126 additions and 440 deletions

View File

@ -141,13 +141,6 @@ Pre_General:
- release
###################################################################################################
Post_ASCIItable:
stage: postprocessing
script: ASCIItable/test.py
except:
- master
- release
Post_General:
stage: postprocessing
script: PostProcessing/test.py

@ -1 +1 @@
Subproject commit 92ca3e83b6093c1af277cfc06a504e4bb09fe8bc
Subproject commit 65ec74c07052e77f35a4b5e80bf110aff1f5ae61

View File

@ -1 +1 @@
v3.0.0-alpha-147-gf0806a9e
v3.0.0-alpha-164-g7cda092a

View File

@ -39,21 +39,21 @@ for filename in options.filenames:
N_digits = 5 # hack to keep test intact
for inc in damask.util.show_progress(results.iterate('increments'),len(results.increments)):
table = damask.Table(np.ones(np.product(results.grid),dtype=int)*int(inc[3:]),{'inc':(1,)})
table.add('pos',coords.reshape(-1,3))
table = table.add('pos',coords.reshape(-1,3))
results.pick('materialpoints',False)
results.pick('constituents', True)
for label in options.con:
x = results.get_dataset_location(label)
if len(x) != 0:
table.add(label,results.read_dataset(x,0,plain=True).reshape(results.grid.prod(),-1))
table = table.add(label,results.read_dataset(x,0,plain=True).reshape(results.grid.prod(),-1))
results.pick('constituents', False)
results.pick('materialpoints',True)
for label in options.mat:
x = results.get_dataset_location(label)
if len(x) != 0:
table.add(label,results.read_dataset(x,0,plain=True).reshape(results.grid.prod(),-1))
table = table.add(label,results.read_dataset(x,0,plain=True).reshape(results.grid.prod(),-1))
dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir))
if not os.path.isdir(dirname):

View File

@ -181,13 +181,13 @@ for name in filenames:
if options.shape:
centers = damask.grid_filters.cell_coord(size,F)
shapeMismatch = shapeMismatch(size,F,nodes,centers)
table.add('shapeMismatch(({}))'.format(options.defgrad),
table = table.add('shapeMismatch(({}))'.format(options.defgrad),
shapeMismatch.reshape(-1,1,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
if options.volume:
volumeMismatch = volumeMismatch(size,F,nodes)
table.add('volMismatch(({}))'.format(options.defgrad),
table = table.add('volMismatch(({}))'.format(options.defgrad),
volumeMismatch.reshape(-1,1,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))

View File

@ -51,7 +51,7 @@ for name in filenames:
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
field = field.reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+shape)
curl = damask.grid_filters.curl(size,field)
table.add('curlFFT({})'.format(label),
table = table.add('curlFFT({})'.format(label),
curl.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape),order='F'),
scriptID+' '+' '.join(sys.argv[1:]))

View File

@ -67,7 +67,7 @@ for name in filenames:
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for label in options.labels:
table.add('d({})/d({})'.format(label,options.coordinates),
table = table.add('d({})/d({})'.format(label,options.coordinates),
derivative(table.get(options.coordinates),table.get(label)),
scriptID+' '+' '.join(sys.argv[1:]))

View File

@ -53,19 +53,19 @@ for name in filenames:
F = table.get(options.f).reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+(3,3))
if options.nodal:
table = damask.Table(damask.grid_filters.node_coord0(grid,size).reshape(-1,3,order='F'),
{'pos':(3,)})
table.add('avg({}).{}'.format(options.f,options.pos),
{'pos':(3,)})\
.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.node_displacement_avg(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.add('fluct({}).{}'.format(options.f,options.pos),
scriptID+' '+' '.join(sys.argv[1:]))\
.add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.node_displacement_fluct(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else os.path.splitext(name)[0]+'_nodal.txt')
else:
table.add('avg({}).{}'.format(options.f,options.pos),
table = table.add('avg({}).{}'.format(options.f,options.pos),
damask.grid_filters.cell_displacement_avg(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.add('fluct({}).{}'.format(options.f,options.pos),
scriptID+' '+' '.join(sys.argv[1:]))\
.add('fluct({}).{}'.format(options.f,options.pos),
damask.grid_filters.cell_displacement_fluct(size,F).reshape(-1,3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -51,7 +51,7 @@ for name in filenames:
shape = (3,) if np.prod(field.shape)//np.prod(grid) == 3 else (3,3) # vector or tensor
field = field.reshape(tuple(grid)+(-1,),order='F').reshape(tuple(grid)+shape)
div = damask.grid_filters.divergence(size,field)
table.add('divFFT({})'.format(label),
table = table.add('divFFT({})'.format(label),
div.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)//3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))

View File

@ -180,7 +180,7 @@ for name in filenames:
for i,feature in enumerate(feature_list):
table.add('ED_{}({})'.format(features[feature]['names'][0],options.id),
table = table.add('ED_{}({})'.format(features[feature]['names'][0],options.id),
distance[i,:],
scriptID+' '+' '.join(sys.argv[1:]))

View File

@ -67,7 +67,7 @@ for name in filenames:
damask.grid_filters.coord0_check(table.get(options.pos))
for label in options.labels:
table.add('Gauss{}({})'.format(options.sigma,label),
table = table.add('Gauss{}({})'.format(options.sigma,label),
ndimage.filters.gaussian_filter(table.get(label).reshape(-1),
options.sigma,options.order,
mode = 'wrap' if options.periodic else 'nearest'),

View File

@ -51,7 +51,7 @@ for name in filenames:
shape = (1,) if np.prod(field.shape)//np.prod(grid) == 1 else (3,) # scalar or vector
field = field.reshape(tuple(grid)+(-1,),order='F')
grad = damask.grid_filters.gradient(size,field)
table.add('gradFFT({})'.format(label),
table = table.add('gradFFT({})'.format(label),
grad.reshape(tuple(grid)+(-1,)).reshape(-1,np.prod(shape)*3,order='F'),
scriptID+' '+' '.join(sys.argv[1:]))

View File

@ -1,64 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from io import StringIO
from optparse import OptionParser
import numpy as np
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Add data in column(s) of mapped ASCIItable selected from the row indexed by the value in a mapping column.
Row numbers start at 1.
""", version = scriptID)
parser.add_option('--index',
dest = 'index',
type = 'string', metavar = 'string',
help = 'column label containing row index')
parser.add_option('-o','--offset',
dest = 'offset',
type = 'int', metavar = 'int',
help = 'constant offset for index column value [%default]')
parser.add_option('-l','--label',
dest = 'label',
action = 'extend', metavar = '<string LIST>',
help = 'column label(s) to be appended')
parser.add_option('-a','--asciitable',
dest = 'asciitable',
type = 'string', metavar = 'string',
help = 'indexed ASCIItable')
parser.set_defaults(offset = 0,
)
(options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if options.label is None:
parser.error('no data columns specified.')
if options.index is None:
parser.error('no index column given.')
for name in filenames:
damask.util.report(scriptName,name)
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
indexedTable = damask.Table.from_ASCII(options.asciitable)
idx = np.reshape(table.get(options.index).astype(int) + options.offset,(-1))-1
for data in options.label:
table.add(data+'_addIndexed',indexedTable.get(data)[idx],scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -1,118 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from optparse import OptionParser
import numpy as np
import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Add data of selected column(s) from (first) row of linked ASCIItable that shares the linking column value.
""", version = scriptID)
parser.add_option('--link',
dest = 'link', nargs = 2,
type = 'string', metavar = 'string string',
help = 'column labels of table and linked table containing linking values')
parser.add_option('-l','--label',
dest = 'label',
action = 'extend', metavar = '<string LIST>',
help = 'column label(s) to add from linked ASCIItable')
parser.add_option('-a','--asciitable',
dest = 'asciitable',
type = 'string', metavar = 'string',
help = 'linked ASCIItable')
parser.set_defaults()
(options,filenames) = parser.parse_args()
if options.label is None:
parser.error('no data columns specified.')
if options.link is None:
parser.error('no linking columns given.')
# -------------------------------------- process linked ASCIItable --------------------------------
if options.asciitable is not None and os.path.isfile(options.asciitable):
linkedTable = damask.ASCIItable(name = options.asciitable, readonly = True)
linkedTable.head_read() # read ASCII header info of linked table
linkDim = linkedTable.label_dimension(options.link[1]) # dimension of linking column
missing_labels = linkedTable.data_readArray([options.link[1]]+options.label) # try reading linked ASCII table
linkedTable.close() # close linked ASCII table
if len(missing_labels) > 0:
damask.util.croak('column{} {} not found...'.format('s' if len(missing_labels) > 1 else '',', '.join(missing_labels)))
if len(missing_labels) >= len(options.label):
damask.util.croak('aborting...')
sys.exit()
index = linkedTable.data[:,:linkDim]
data = linkedTable.data[:,linkDim:]
else:
parser.error('no linked ASCIItable given.')
# --- loop over input files -----------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name)
except IOError:
continue
damask.util.report(scriptName,"{} {} <== {} {}".format(name,damask.util.deemph('@ '+options.link[0]),
options.asciitable,damask.util.deemph('@ '+options.link[1])))
# ------------------------------------------ read header ------------------------------------------
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
myLink = table.label_index (options.link[0])
myLinkDim = table.label_dimension(options.link[0])
if myLink < 0: errors.append('linking column {} not found.'.format(options.link[0]))
if myLinkDim != linkDim: errors.append('dimension mismatch for column {}.'.format(options.link[0]))
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(linkedTable.labels(raw = True)[linkDim:]) # extend with new labels (except for linked column)
table.head_write()
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
try:
table.data_append(data[np.argwhere(np.all((list(map(float,table.data[myLink:myLink+myLinkDim])) - index)==0,
axis=1))[0]]) # add data of first matching line
except IndexError:
table.data_append(np.nan*np.ones_like(data[0])) # or add NaNs
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables

View File

@ -137,14 +137,14 @@ for name in filenames:
if 'rodrigues' in options.output:
table.add('ro({})'.format(label),o.as_Rodrigues(), scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('ro({})'.format(label),o.as_Rodrigues(), scriptID+' '+' '.join(sys.argv[1:]))
if 'eulers' in options.output:
table.add('eu({})'.format(label),o.as_Eulers(options.degrees), scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('eu({})'.format(label),o.as_Eulers(options.degrees), scriptID+' '+' '.join(sys.argv[1:]))
if 'quaternion' in options.output:
table.add('qu({})'.format(label),o.as_quaternion(), scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('qu({})'.format(label),o.as_quaternion(), scriptID+' '+' '.join(sys.argv[1:]))
if 'matrix' in options.output:
table.add('om({})'.format(label),o.as_matrix(), scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('om({})'.format(label),o.as_matrix(), scriptID+' '+' '.join(sys.argv[1:]))
if 'axisangle' in options.output:
table.add('om({})'.format(label),o.as_axisangle(options.degrees), scriptID+' '+' '.join(sys.argv[1:]))
table = table.add('om({})'.format(label),o.as_axisangle(options.degrees), scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -187,6 +187,6 @@ for name in filenames:
np.einsum('ijk,ik->ij',slip_normal, (o@normal)))
for i,label in enumerate(labels):
table.add(label,S[:,i],scriptID+' '+' '.join(sys.argv[1:]))
table = table.add(label,S[:,i],scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -1,142 +0,0 @@
#!/usr/bin/env python3
import os
import sys
from optparse import OptionParser, OptionGroup
import math # noqa
import numpy as np
import damask
def periodicAverage(coords, limits):
"""Centroid in periodic domain, see https://en.wikipedia.org/wiki/Center_of_mass#Systems_with_periodic_boundary_conditions."""
theta = 2.0*np.pi * (coords - limits[0])/(limits[1] - limits[0])
theta_avg = np.pi + np.arctan2(-np.sin(theta).mean(axis=0), -np.cos(theta).mean(axis=0))
return limits[0] + theta_avg * (limits[1] - limits[0])/2.0/np.pi
scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version])
# --------------------------------------------------------------------
# MAIN
# --------------------------------------------------------------------
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [ASCIItable(s)]', description = """
Apply a user-specified function to condense into a single row all those rows for which columns 'label' have identical values.
Output table will contain as many rows as there are different (unique) values in the grouping column(s).
Periodic domain averaging of coordinate values is supported.
Examples:
For grain averaged values, replace all rows of particular 'texture' with a single row containing their average.
{name} --label texture --function np.average data.txt
""".format(name = scriptName), version = scriptID)
parser.add_option('-l','--label',
dest = 'label',
action = 'extend', metavar = '<string LIST>',
help = 'column label(s) for grouping rows')
parser.add_option('-f','--function',
dest = 'function',
type = 'string', metavar = 'string',
help = 'mapping function [%default]')
parser.add_option('-a','--all',
dest = 'all',
action = 'store_true',
help = 'apply mapping function also to grouping column(s)')
group = OptionGroup(parser, "periodic averaging", "")
group.add_option ('-p','--periodic',
dest = 'periodic',
action = 'extend', metavar = '<string LIST>',
help = 'coordinate label(s) to average across periodic domain')
group.add_option ('--limits',
dest = 'boundary',
type = 'float', metavar = 'float float', nargs = 2,
help = 'min and max of periodic domain %default')
parser.add_option_group(group)
parser.set_defaults(function = 'np.average',
all = False,
label = [],
boundary = [0.0, 1.0])
(options,filenames) = parser.parse_args()
funcModule,funcName = options.function.split('.')
try:
mapFunction = getattr(locals().get(funcModule) or
globals().get(funcModule) or
__import__(funcModule),
funcName)
except Exception:
mapFunction = None
if options.label is []:
parser.error('no grouping column specified.')
if not hasattr(mapFunction,'__call__'):
parser.error('function "{}" is not callable.'.format(options.function))
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames:
try:
table = damask.ASCIItable(name = name)
except IOError:
continue
damask.util.report(scriptName,name)
# ------------------------------------------ sanity checks ---------------------------------------
remarks = []
errors = []
table.head_read()
grpColumns = table.label_index(options.label)[::-1]
grpColumns = grpColumns[np.where(grpColumns>=0)]
if len(grpColumns) == 0: errors.append('no valid grouping column present.')
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss=True)
continue
# ------------------------------------------ assemble info ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write()
# ------------------------------------------ process data --------------------------------
table.data_readArray()
indexrange = table.label_indexrange(options.periodic) if options.periodic is not None else []
rows,cols = table.data.shape
table.data = table.data[np.lexsort(table.data[:,grpColumns].T)] # sort data by grpColumn(s)
values,index = np.unique(table.data[:,grpColumns], axis=0, return_index=True) # unique grpColumn values and their positions
index = sorted(np.append(index,rows)) # add termination position
grpTable = np.empty((len(values), cols)) # initialize output
for i in range(len(values)): # iterate over groups (unique values in grpColumn)
grpTable[i] = np.apply_along_axis(mapFunction,0,table.data[index[i]:index[i+1]]) # apply (general) mapping function
grpTable[i,indexrange] = \
periodicAverage(table.data[index[i]:index[i+1],indexrange],options.boundary) # apply periodicAverage mapping function
if not options.all: grpTable[i,grpColumns] = table.data[index[i],grpColumns] # restore grouping column value
table.data = grpTable
# ------------------------------------------ output result -------------------------------
table.data_writeArray()
table.close() # close ASCII table

View File

@ -56,6 +56,6 @@ for name in filenames:
data = table.get(label)
uniques,inverse = np.unique(data,return_inverse=True,axis=0) if options.unique else (data,np.arange(len(data)))
rng.shuffle(uniques)
table.set(label,uniques[inverse], scriptID+' '+' '.join(sys.argv[1:]))
table = table.set(label,uniques[inverse], scriptID+' '+' '.join(sys.argv[1:]))
table.to_file(sys.stdout if name is None else name)

View File

@ -64,6 +64,5 @@ for name in filenames:
'homogenization\t{}'.format(geom.homogenization)]
table = damask.Table(seeds[mask],{'pos':(3,)},comments)
table.add('microstructure',microstructure[mask])
table.to_file(sys.stdout if name is None else \
os.path.splitext(name)[0]+'.seeds')
table = table.add('microstructure',microstructure[mask])
table.to_file(sys.stdout if name is None else os.path.splitext(name)[0]+'.seeds')

View File

@ -155,11 +155,11 @@ for name in filenames:
]
table = damask.Table(np.hstack((seeds,eulers)),{'pos':(3,),'euler':(3,)},comments)
table.add('microstructure',np.arange(options.microstructure,options.microstructure + options.N,dtype=int))
table = table.add('microstructure',np.arange(options.microstructure,options.microstructure + options.N,dtype=int))
if options.weights:
weights = np.random.uniform(low = 0, high = options.max, size = options.N) if options.max > 0.0 \
else np.random.normal(loc = options.mean, scale = options.sigma, size = options.N)
table.add('weight',weights)
table = table.add('weight',weights)
table.to_file(sys.stdout if name is None else name)

View File

@ -1,4 +1,5 @@
import re
import copy
import pandas as pd
import numpy as np
@ -29,6 +30,15 @@ class Table:
self._label_condensed()
def __copy__(self):
"""Copy Table."""
return copy.deepcopy(self)
def copy(self):
"""Copy Table."""
return self.__copy__()
def _label_flat(self):
"""Label data individually, e.g. v v v ==> 1_v 2_v 3_v."""
labels = []
@ -191,15 +201,16 @@ class Table:
Human-readable information about the new data.
"""
self._add_comment(label,data.shape[1:],info)
dup = self.copy()
dup._add_comment(label,data.shape[1:],info)
if re.match(r'[0-9]*?_',label):
idx,key = label.split('_',1)
iloc = self.data.columns.get_loc(key).tolist().index(True) + int(idx) -1
self.data.iloc[:,iloc] = data
iloc = dup.data.columns.get_loc(key).tolist().index(True) + int(idx) -1
dup.data.iloc[:,iloc] = data
else:
self.data[label] = data.reshape(self.data[label].shape)
dup.data[label] = data.reshape(dup.data[label].shape)
return dup
def add(self,label,data,info=None):
"""
@ -215,15 +226,17 @@ class Table:
Human-readable information about the modified data.
"""
self._add_comment(label,data.shape[1:],info)
dup = self.copy()
dup._add_comment(label,data.shape[1:],info)
self.shapes[label] = data.shape[1:] if len(data.shape) > 1 else (1,)
dup.shapes[label] = data.shape[1:] if len(data.shape) > 1 else (1,)
size = np.prod(data.shape[1:],dtype=int)
new = pd.DataFrame(data=data.reshape(-1,size),
columns=[label]*size,
)
new.index = self.data.index
self.data = pd.concat([self.data,new],axis=1)
new.index = dup.data.index
dup.data = pd.concat([dup.data,new],axis=1)
return dup
def delete(self,label):
@ -236,25 +249,31 @@ class Table:
Column label.
"""
self.data.drop(columns=label,inplace=True)
del self.shapes[label]
dup = self.copy()
dup.data.drop(columns=label,inplace=True)
del dup.shapes[label]
return dup
def rename(self,label_old,label_new,info=None):
def rename(self,old,new,info=None):
"""
Rename column data.
Parameters
----------
label_old : str
Old column label.
label_new : str
New column label.
label_old : str or iterable of str
Old column label(s).
label_new : str or iterable of str
New column label(s).
"""
self.data.rename(columns={label_old:label_new},inplace=True)
self.comments.append(f'{label_old} => {label_new}'+('' if info is None else f': {info}'))
self.shapes = {(label if label != label_old else label_new):self.shapes[label] for label in self.shapes}
dup = self.copy()
columns = dict(zip([old] if isinstance(old,str) else old,
[new] if isinstance(new,str) else new))
dup.data.rename(columns=columns,inplace=True)
dup.comments.append(f'{old} => {new}'+('' if info is None else f': {info}'))
dup.shapes = {(label if label not in columns else columns[label]):dup.shapes[label] for label in dup.shapes}
return dup
def sort_by(self,labels,ascending=True):
@ -269,10 +288,12 @@ class Table:
Set sort order.
"""
self._label_flat()
self.data.sort_values(labels,axis=0,inplace=True,ascending=ascending)
self._label_condensed()
self.comments.append(f'sorted by [{", ".join(labels)}]')
dup = self.copy()
dup._label_flat()
dup.data.sort_values(labels,axis=0,inplace=True,ascending=ascending)
dup._label_condensed()
dup.comments.append(f'sorted {"ascending" if ascending else "descending"} by {labels}')
return dup
def append(self,other):
@ -290,7 +311,9 @@ class Table:
if self.shapes != other.shapes or not self.data.columns.equals(other.data.columns):
raise KeyError('Labels or shapes or order do not match')
else:
self.data = self.data.append(other.data,ignore_index=True)
dup = self.copy()
dup.data = dup.data.append(other.data,ignore_index=True)
return dup
def join(self,other):
@ -308,9 +331,11 @@ class Table:
if set(self.shapes) & set(other.shapes) or self.data.shape[0] != other.data.shape[0]:
raise KeyError('Dublicated keys or row count mismatch')
else:
self.data = self.data.join(other.data)
dup = self.copy()
dup.data = dup.data.join(other.data)
for key in other.shapes:
self.shapes[key] = other.shapes[key]
dup.shapes[key] = other.shapes[key]
return dup
def to_file(self,fname,format='ASCII',new_style=False):

View File

@ -105,7 +105,7 @@ class TestOrientation:
if update:
coords = np.array([(1,i+1) for i,x in enumerate(eu)])
table = Table(eu,{'Eulers':(3,)})
table.add('pos',coords)
table = table.add('pos',coords)
table.to_ASCII(reference)
assert np.allclose(eu,Table.from_ASCII(reference).get('Eulers'))

View File

@ -81,13 +81,11 @@ class TestTable:
Table.from_ASCII(f)
def test_set(self,default):
default.set('F',np.zeros((5,3,3)),'set to zero')
d=default.get('F')
d = default.set('F',np.zeros((5,3,3)),'set to zero').get('F')
assert np.allclose(d,0.0) and d.shape[1:] == (3,3)
def test_set_component(self,default):
default.set('1_F',np.zeros((5)),'set to zero')
d=default.get('F')
d = default.set('1_F',np.zeros((5)),'set to zero').get('F')
assert np.allclose(d[...,0,0],0.0) and d.shape[1:] == (3,3)
def test_labels(self,default):
@ -95,36 +93,34 @@ class TestTable:
def test_add(self,default):
d = np.random.random((5,9))
default.add('nine',d,'random data')
assert np.allclose(d,default.get('nine'))
assert np.allclose(d,default.add('nine',d,'random data').get('nine'))
def test_rename_equivalent(self):
x = np.random.random((5,13))
t = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data'])
s = t.get('s')
t.rename('s','u')
u = t.get('u')
u = t.rename('s','u').get('u')
assert np.all(s == u)
def test_rename_gone(self,default):
default.rename('v','V')
assert 'v' not in default.shapes and 'v' not in default.data.columns
gone = default.rename('v','V')
assert 'v' not in gone.shapes and 'v' not in gone.data.columns
with pytest.raises(KeyError):
default.get('v')
gone.get('v')
def test_delete(self,default):
default.delete('v')
assert 'v' not in default.shapes and 'v' not in default.data.columns
delete = default.delete('v')
assert 'v' not in delete.shapes and 'v' not in delete.data.columns
with pytest.raises(KeyError):
default.get('v')
delete.get('v')
def test_join(self):
x = np.random.random((5,13))
a = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data'])
y = np.random.random((5,3))
b = Table(y,{'u':(3,)},['random test data'])
a.join(b)
assert np.array_equal(a.get('u'), b.get('u'))
c = a.join(b)
assert np.array_equal(c.get('u'), b.get('u'))
def test_join_invalid(self):
x = np.random.random((5,13))
@ -135,8 +131,8 @@ class TestTable:
def test_append(self):
x = np.random.random((5,13))
a = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data'])
a.append(a)
assert np.array_equal(a.data[:5].to_numpy(),a.data[5:].to_numpy())
b = a.append(a)
assert np.array_equal(b.data[:5].to_numpy(),b.data[5:].to_numpy())
def test_append_invalid(self):
x = np.random.random((5,13))
@ -163,29 +159,26 @@ class TestTable:
x = np.random.random((5,13))
t = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data'])
unsort = t.get('s')
t.sort_by('s')
sort = t.get('s')
sort = t.sort_by('s').get('s')
assert np.all(np.sort(unsort,0)==sort)
def test_sort_component(self):
x = np.random.random((5,12))
t = Table(x,{'F':(3,3),'v':(3,)},['random test data'])
unsort = t.get('4_F')
t.sort_by('4_F')
sort = t.get('4_F')
sort = t.sort_by('4_F').get('4_F')
assert np.all(np.sort(unsort,0)==sort)
def test_sort_revert(self):
x = np.random.random((5,12))
t = Table(x,{'F':(3,3),'v':(3,)},['random test data'])
t.sort_by('4_F',ascending=False)
sort = t.get('4_F')
sort = t.sort_by('4_F',ascending=False).get('4_F')
assert np.all(np.sort(sort,0)==sort[::-1,:])
def test_sort(self):
t = Table(np.array([[0,1,],[2,1,]]),
{'v':(2,)},
['test data'])
t.add('s',np.array(['b','a']))
t.sort_by('s')
['test data'])\
.add('s',np.array(['b','a']))\
.sort_by('s')
assert np.all(t.get('1_v') == np.array([2,0]).reshape(2,1))

View File

@ -252,7 +252,7 @@ subroutine crystallite_init
do e = FEsolving_execElem(1),FEsolving_execElem(2)
myNcomponents = homogenization_Ngrains(material_homogenizationAt(e))
do i = FEsolving_execIP(1), FEsolving_execIP(2); do c = 1, myNcomponents
crystallite_Fp0(1:3,1:3,c,i,e) = material_orientation0(c,i,e)%asMatrix() ! plastic def gradient reflects init orientation
crystallite_Fp0(1:3,1:3,c,i,e) = material_orientation0(c,i,e)%asMatrix() ! Fp reflects initial orientation (see 10.1016/j.actamat.2006.01.005)
crystallite_Fp0(1:3,1:3,c,i,e) = crystallite_Fp0(1:3,1:3,c,i,e) &
/ math_det33(crystallite_Fp0(1:3,1:3,c,i,e))**(1.0_pReal/3.0_pReal)
crystallite_Fi0(1:3,1:3,c,i,e) = constitutive_initialFi(c,i,e)

View File

@ -438,7 +438,7 @@ subroutine readVTR(grid,geomSize,origin,microstructure)
coords = as_pReal(base64_str,headerType,compressed,dataType)
delta = coords(2:) - coords(:size(coords)-1)
if(any(delta<0.0_pReal) .or. dNeq(maxval(delta),minval(delta))) &
if(any(delta<0.0_pReal) .or. dNeq(maxval(delta),minval(delta),1.0e-8_pReal*maxval(abs(coords)))) &
call IO_error(error_ID = 844, ext_msg = 'grid spacing')
grid(direction) = size(coords)-1