Merge branch 'development' of magit1.mpie.de:damask/DAMASK into incs-no-leading-zero
This commit is contained in:
commit
96710a238d
|
@ -1,6 +1,7 @@
|
||||||
---
|
---
|
||||||
stages:
|
stages:
|
||||||
- prepareAll
|
- prepareAll
|
||||||
|
- python
|
||||||
- preprocessing
|
- preprocessing
|
||||||
- postprocessing
|
- postprocessing
|
||||||
- compilePETSc
|
- compilePETSc
|
||||||
|
@ -103,6 +104,16 @@ checkout:
|
||||||
- master
|
- master
|
||||||
- release
|
- release
|
||||||
|
|
||||||
|
###################################################################################################
|
||||||
|
Pytest:
|
||||||
|
stage: python
|
||||||
|
script:
|
||||||
|
- cd $DAMASKROOT/python
|
||||||
|
- pytest
|
||||||
|
except:
|
||||||
|
- master
|
||||||
|
- release
|
||||||
|
|
||||||
###################################################################################################
|
###################################################################################################
|
||||||
OrientationRelationship:
|
OrientationRelationship:
|
||||||
stage: preprocessing
|
stage: preprocessing
|
||||||
|
|
|
@ -49,7 +49,7 @@ Phase_types = {'Primary': 0} #further additions to these can be done by looking
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
parser = argparse.ArgumentParser(description='Creating a file for DREAM3D from DAMASK data')
|
parser = argparse.ArgumentParser(description='Creating a file for DREAM3D from DAMASK data')
|
||||||
parser.add_argument('filenames',nargs='+',help='HDF5 based output file')
|
parser.add_argument('filenames',nargs='+',help='HDF5 based output file')
|
||||||
parser.add_argument('--inc',nargs='+',help='Increment for which DREAM3D to be used, eg. 00025',type=int)
|
parser.add_argument('--inc',nargs='+',help='Increment for which DREAM3D to be used, eg. 25',type=int)
|
||||||
parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string',
|
parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string',
|
||||||
help='name of subdirectory to hold output')
|
help='name of subdirectory to hold output')
|
||||||
|
|
||||||
|
@ -59,15 +59,13 @@ options = parser.parse_args()
|
||||||
# loop over input files
|
# loop over input files
|
||||||
for filename in options.filenames:
|
for filename in options.filenames:
|
||||||
f = damask.DADF5(filename) #DAMASK output file
|
f = damask.DADF5(filename) #DAMASK output file
|
||||||
count = 0
|
for increment in options.inc:
|
||||||
for increment in f.increments:
|
f.set_by_increment(increment,increment)
|
||||||
if int(increment[3:]) not in options.inc:
|
if len(f.visible['increments']) == 0:
|
||||||
count = count + 1
|
|
||||||
continue
|
continue
|
||||||
|
|
||||||
#-------output file creation-------------------------------------
|
#-------output file creation-------------------------------------
|
||||||
dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir))
|
dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir))
|
||||||
print(dirname)
|
|
||||||
try:
|
try:
|
||||||
os.mkdir(dirname)
|
os.mkdir(dirname)
|
||||||
except FileExistsError:
|
except FileExistsError:
|
||||||
|
@ -90,11 +88,10 @@ for filename in options.filenames:
|
||||||
# Phase information of DREAM.3D is constituent ID in DAMASK
|
# Phase information of DREAM.3D is constituent ID in DAMASK
|
||||||
o[cell_data_label + '/Phases'] = f.get_constituent_ID().reshape(tuple(f.grid)+(1,))
|
o[cell_data_label + '/Phases'] = f.get_constituent_ID().reshape(tuple(f.grid)+(1,))
|
||||||
# Data quaternions
|
# Data quaternions
|
||||||
DAMASK_quaternion = f.read_dataset(f.get_dataset_location('orientation'),0)
|
DAMASK_quaternion = f.read_dataset(f.get_dataset_location('orientation'))
|
||||||
DREAM_3D_quaternion = np.empty((np.prod(f.grid),4),dtype=np.float32)
|
|
||||||
# Convert: DAMASK uses P = -1, DREAM.3D uses P = +1. Also change position of imagninary part
|
# Convert: DAMASK uses P = -1, DREAM.3D uses P = +1. Also change position of imagninary part
|
||||||
DREAM_3D_quaternion = np.hstack((-DAMASK_quaternion['x'],-DAMASK_quaternion['y'],-DAMASK_quaternion['z'],
|
DREAM_3D_quaternion = np.hstack((-DAMASK_quaternion['x'],-DAMASK_quaternion['y'],-DAMASK_quaternion['z'],
|
||||||
DAMASK_quaternion['w']))
|
DAMASK_quaternion['w'])).astype(np.float32)
|
||||||
o[cell_data_label + '/Quats'] = DREAM_3D_quaternion.reshape(tuple(f.grid)+(4,))
|
o[cell_data_label + '/Quats'] = DREAM_3D_quaternion.reshape(tuple(f.grid)+(4,))
|
||||||
|
|
||||||
# Attributes to CellData group
|
# Attributes to CellData group
|
||||||
|
@ -109,10 +106,12 @@ for filename in options.filenames:
|
||||||
# phase attributes
|
# phase attributes
|
||||||
o[cell_data_label + '/Phases'].attrs['ComponentDimensions'] = np.array([1],np.uint64)
|
o[cell_data_label + '/Phases'].attrs['ComponentDimensions'] = np.array([1],np.uint64)
|
||||||
o[cell_data_label + '/Phases'].attrs['ObjectType'] = 'DataArray<int32_t>'
|
o[cell_data_label + '/Phases'].attrs['ObjectType'] = 'DataArray<int32_t>'
|
||||||
|
o[cell_data_label + '/Phases'].attrs['TupleDimensions'] = f.grid.astype(np.uint64)
|
||||||
|
|
||||||
# Quats attributes
|
# Quats attributes
|
||||||
o[cell_data_label + '/Quats'].attrs['ComponentDimensions'] = np.array([4],np.uint64)
|
o[cell_data_label + '/Quats'].attrs['ComponentDimensions'] = np.array([4],np.uint64)
|
||||||
o[cell_data_label + '/Quats'].attrs['ObjectType'] = 'DataArray<float>'
|
o[cell_data_label + '/Quats'].attrs['ObjectType'] = 'DataArray<float>'
|
||||||
|
o[cell_data_label + '/Quats'].attrs['TupleDimensions'] = f.grid.astype(np.uint64)
|
||||||
|
|
||||||
# Create EnsembleAttributeMatrix
|
# Create EnsembleAttributeMatrix
|
||||||
ensemble_label = data_container_label + '/EnsembleAttributeMatrix'
|
ensemble_label = data_container_label + '/EnsembleAttributeMatrix'
|
||||||
|
|
|
@ -2,10 +2,9 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from io import StringIO
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import damask
|
import damask
|
||||||
|
|
||||||
|
|
||||||
|
@ -36,54 +35,15 @@ parser.set_defaults(defgrad = 'f',
|
||||||
)
|
)
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
if filenames == []: filenames = [None]
|
||||||
|
|
||||||
for name in filenames:
|
for name in filenames:
|
||||||
try:
|
|
||||||
table = damask.ASCIItable(name = name, buffered = False)
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
damask.util.report(scriptName,name)
|
damask.util.report(scriptName,name)
|
||||||
|
|
||||||
# ------------------------------------------ read header ------------------------------------------
|
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||||
|
table.add('Cauchy',
|
||||||
|
damask.mechanics.Cauchy(table.get(options.defgrad).reshape(-1,3,3),
|
||||||
|
table.get(options.stress ).reshape(-1,3,3)).reshape(-1,9),
|
||||||
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
table.head_read()
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
|
|
||||||
# ------------------------------------------ sanity checks ----------------------------------------
|
|
||||||
|
|
||||||
errors = []
|
|
||||||
column = {}
|
|
||||||
|
|
||||||
for tensor in [options.defgrad,options.stress]:
|
|
||||||
dim = table.label_dimension(tensor)
|
|
||||||
if dim < 0: errors.append('column {} not found.'.format(tensor))
|
|
||||||
elif dim != 9: errors.append('column {} is not a tensor.'.format(tensor))
|
|
||||||
else:
|
|
||||||
column[tensor] = table.label_index(tensor)
|
|
||||||
|
|
||||||
if errors != []:
|
|
||||||
damask.util.croak(errors)
|
|
||||||
table.close(dismiss = True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# ------------------------------------------ assemble header --------------------------------------
|
|
||||||
|
|
||||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
|
||||||
table.labels_append(['{}_Cauchy'.format(i+1) for i in range(9)]) # extend ASCII header with new labels
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ process data ------------------------------------------
|
|
||||||
|
|
||||||
outputAlive = True
|
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
|
||||||
F = np.array(list(map(float,table.data[column[options.defgrad]:column[options.defgrad]+9])),'d').reshape(3,3)
|
|
||||||
P = np.array(list(map(float,table.data[column[options.stress ]:column[options.stress ]+9])),'d').reshape(3,3)
|
|
||||||
table.data_append(list(1.0/np.linalg.det(F)*np.dot(P,F.T).reshape(9))) # [Cauchy] = (1/det(F)) * [P].[F_transpose]
|
|
||||||
outputAlive = table.data_write() # output processed line
|
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
|
||||||
|
|
||||||
table.close() # close input ASCII table (works for stdin)
|
|
||||||
|
|
|
@ -2,22 +2,16 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from io import StringIO
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
|
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
import damask
|
import damask
|
||||||
|
|
||||||
|
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
scriptID = ' '.join([scriptName,damask.version])
|
||||||
|
|
||||||
def determinant(m):
|
|
||||||
return +m[0]*m[4]*m[8] \
|
|
||||||
+m[1]*m[5]*m[6] \
|
|
||||||
+m[2]*m[3]*m[7] \
|
|
||||||
-m[2]*m[4]*m[6] \
|
|
||||||
-m[1]*m[3]*m[8] \
|
|
||||||
-m[0]*m[5]*m[7]
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
# MAIN
|
# MAIN
|
||||||
|
@ -34,61 +28,18 @@ parser.add_option('-t','--tensor',
|
||||||
help = 'heading of columns containing tensor field values')
|
help = 'heading of columns containing tensor field values')
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
if filenames == []: filenames = [None]
|
||||||
|
|
||||||
if options.tensor is None:
|
if options.tensor is None:
|
||||||
parser.error('no data column specified.')
|
parser.error('no data column specified.')
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
for name in filenames:
|
||||||
try:
|
|
||||||
table = damask.ASCIItable(name = name,
|
|
||||||
buffered = False)
|
|
||||||
except: continue
|
|
||||||
damask.util.report(scriptName,name)
|
damask.util.report(scriptName,name)
|
||||||
|
|
||||||
# ------------------------------------------ read header ------------------------------------------
|
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||||
|
for tensor in options.tensor:
|
||||||
|
table.add('det({})'.format(tensor),
|
||||||
|
np.linalg.det(table.get(tensor).reshape(-1,3,3)),
|
||||||
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
table.head_read()
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
|
|
||||||
# ------------------------------------------ sanity checks ----------------------------------------
|
|
||||||
|
|
||||||
items = {
|
|
||||||
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'column': []},
|
|
||||||
}
|
|
||||||
errors = []
|
|
||||||
remarks = []
|
|
||||||
|
|
||||||
for type, data in items.items():
|
|
||||||
for what in data['labels']:
|
|
||||||
dim = table.label_dimension(what)
|
|
||||||
if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type))
|
|
||||||
else:
|
|
||||||
items[type]['column'].append(table.label_index(what))
|
|
||||||
table.labels_append('det({})'.format(what)) # extend ASCII header with new labels
|
|
||||||
|
|
||||||
if remarks != []: damask.util.croak(remarks)
|
|
||||||
if errors != []:
|
|
||||||
damask.util.croak(errors)
|
|
||||||
table.close(dismiss = True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# ------------------------------------------ assemble header --------------------------------------
|
|
||||||
|
|
||||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ process data ------------------------------------------
|
|
||||||
|
|
||||||
outputAlive = True
|
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
|
||||||
for type, data in items.items():
|
|
||||||
for column in data['column']:
|
|
||||||
table.data_append(determinant(list(map(float,table.data[column: column+data['dim']]))))
|
|
||||||
outputAlive = table.data_write() # output processed line
|
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
|
||||||
|
|
||||||
table.close() # close input ASCII table (works for stdin)
|
|
||||||
|
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from io import StringIO
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
|
|
||||||
import damask
|
import damask
|
||||||
|
@ -9,17 +10,6 @@ import damask
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
scriptID = ' '.join([scriptName,damask.version])
|
||||||
|
|
||||||
oneThird = 1.0/3.0
|
|
||||||
|
|
||||||
def deviator(m,spherical = False): # Careful, do not change the value of m, its intent(inout)!
|
|
||||||
sph = oneThird*(m[0]+m[4]+m[8])
|
|
||||||
dev = [
|
|
||||||
m[0]-sph, m[1], m[2],
|
|
||||||
m[3], m[4]-sph, m[5],
|
|
||||||
m[6], m[7], m[8]-sph,
|
|
||||||
]
|
|
||||||
return dev,sph if spherical else dev
|
|
||||||
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
# MAIN
|
# MAIN
|
||||||
|
@ -40,67 +30,22 @@ parser.add_option('-s','--spherical',
|
||||||
help = 'report spherical part of tensor (hydrostatic component, pressure)')
|
help = 'report spherical part of tensor (hydrostatic component, pressure)')
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
if filenames == []: filenames = [None]
|
||||||
|
|
||||||
if options.tensor is None:
|
if options.tensor is None:
|
||||||
parser.error('no data column specified...')
|
parser.error('no data column specified...')
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
for name in filenames:
|
||||||
try:
|
|
||||||
table = damask.ASCIItable(name = name, buffered = False)
|
|
||||||
except:
|
|
||||||
continue
|
|
||||||
damask.util.report(scriptName,name)
|
damask.util.report(scriptName,name)
|
||||||
|
|
||||||
# ------------------------------------------ read header ------------------------------------------
|
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||||
|
for tensor in options.tensor:
|
||||||
|
table.add('dev({})'.format(tensor),
|
||||||
|
damask.mechanics.deviatoric_part(table.get(tensor).reshape(-1,3,3)).reshape((-1,9)),
|
||||||
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
if options.spherical:
|
||||||
|
table.add('sph({})'.format(tensor),
|
||||||
|
damask.mechanics.spherical_part(table.get(tensor).reshape(-1,3,3)),
|
||||||
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
table.head_read()
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
|
|
||||||
# ------------------------------------------ sanity checks ----------------------------------------
|
|
||||||
|
|
||||||
items = {
|
|
||||||
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'active':[], 'column': []},
|
|
||||||
}
|
|
||||||
errors = []
|
|
||||||
remarks = []
|
|
||||||
column = {}
|
|
||||||
|
|
||||||
for type, data in items.items():
|
|
||||||
for what in data['labels']:
|
|
||||||
dim = table.label_dimension(what)
|
|
||||||
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
|
|
||||||
else:
|
|
||||||
items[type]['active'].append(what)
|
|
||||||
items[type]['column'].append(table.label_index(what))
|
|
||||||
|
|
||||||
if remarks != []: damask.util.croak(remarks)
|
|
||||||
if errors != []:
|
|
||||||
damask.util.croak(errors)
|
|
||||||
table.close(dismiss = True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# ------------------------------------------ assemble header --------------------------------------
|
|
||||||
|
|
||||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
|
||||||
for type, data in items.items():
|
|
||||||
for label in data['active']:
|
|
||||||
table.labels_append(['{}_dev({})'.format(i+1,label) for i in range(data['dim'])] + \
|
|
||||||
(['sph({})'.format(label)] if options.spherical else [])) # extend ASCII header with new labels
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ process data ------------------------------------------
|
|
||||||
|
|
||||||
outputAlive = True
|
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
|
||||||
for type, data in items.items():
|
|
||||||
for column in data['column']:
|
|
||||||
table.data_append(deviator(list(map(float,table.data[column:
|
|
||||||
column+data['dim']])),options.spherical))
|
|
||||||
outputAlive = table.data_write() # output processed line
|
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
|
||||||
|
|
||||||
table.close() # close input ASCII table (works for stdin)
|
|
||||||
|
|
|
@ -1,6 +1,8 @@
|
||||||
#!/usr/bin/env python3
|
#!/usr/bin/env python3
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import sys
|
||||||
|
from io import StringIO
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
|
|
||||||
import damask
|
import damask
|
||||||
|
@ -24,35 +26,16 @@ parser.add_option('-i',
|
||||||
dest = 'info', action = 'extend', metavar = '<string LIST>',
|
dest = 'info', action = 'extend', metavar = '<string LIST>',
|
||||||
help = 'items to add')
|
help = 'items to add')
|
||||||
|
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
if filenames == []: filenames = [None]
|
||||||
|
|
||||||
if options.info is None:
|
if options.info is None:
|
||||||
parser.error('no info specified.')
|
parser.error('no info specified.')
|
||||||
|
|
||||||
# --- loop over input files ------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
|
||||||
|
|
||||||
for name in filenames:
|
for name in filenames:
|
||||||
try: table = damask.ASCIItable(name = name,
|
|
||||||
buffered = False)
|
|
||||||
except: continue
|
|
||||||
damask.util.report(scriptName,name)
|
damask.util.report(scriptName,name)
|
||||||
|
|
||||||
# ------------------------------------------ assemble header ---------------------------------------
|
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||||
|
table.comments += options.info
|
||||||
|
|
||||||
table.head_read()
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
table.info_append(options.info)
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ pass through data -------------------------------------
|
|
||||||
|
|
||||||
outputAlive = True
|
|
||||||
|
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
|
||||||
outputAlive = table.data_write() # output processed line
|
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
|
||||||
|
|
||||||
table.close() # close ASCII tables
|
|
||||||
|
|
|
@ -2,10 +2,8 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from io import StringIO
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
from collections import OrderedDict
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import damask
|
import damask
|
||||||
|
|
||||||
|
@ -13,15 +11,6 @@ import damask
|
||||||
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
||||||
scriptID = ' '.join([scriptName,damask.version])
|
scriptID = ' '.join([scriptName,damask.version])
|
||||||
|
|
||||||
def Mises(what,tensor):
|
|
||||||
|
|
||||||
dev = tensor - np.trace(tensor)/3.0*np.eye(3)
|
|
||||||
symdev = 0.5*(dev+dev.T)
|
|
||||||
return np.sqrt(np.sum(symdev*symdev.T)*
|
|
||||||
{
|
|
||||||
'stress': 3.0/2.0,
|
|
||||||
'strain': 2.0/3.0,
|
|
||||||
}[what.lower()])
|
|
||||||
|
|
||||||
# --------------------------------------------------------------------
|
# --------------------------------------------------------------------
|
||||||
# MAIN
|
# MAIN
|
||||||
|
@ -49,60 +38,19 @@ parser.set_defaults(strain = [],
|
||||||
if options.stress is [] and options.strain is []:
|
if options.stress is [] and options.strain is []:
|
||||||
parser.error('no data column specified...')
|
parser.error('no data column specified...')
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
if filenames == []: filenames = [None]
|
||||||
|
|
||||||
for name in filenames:
|
for name in filenames:
|
||||||
try:
|
|
||||||
table = damask.ASCIItable(name = name,
|
|
||||||
buffered = False)
|
|
||||||
except: continue
|
|
||||||
damask.util.report(scriptName,name)
|
damask.util.report(scriptName,name)
|
||||||
|
|
||||||
# ------------------------------------------ read header ------------------------------------------
|
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||||
|
for strain in options.strain:
|
||||||
|
table.add('Mises({})'.format(strain),
|
||||||
|
damask.mechanics.Mises_strain(damask.mechanics.symmetric(table.get(strain).reshape(-1,3,3))),
|
||||||
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
for stress in options.stress:
|
||||||
|
table.add('Mises({})'.format(stress),
|
||||||
|
damask.mechanics.Mises_stress(damask.mechanics.symmetric(table.get(stress).reshape(-1,3,3))),
|
||||||
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
table.head_read()
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
|
|
||||||
# ------------------------------------------ sanity checks ----------------------------------------
|
|
||||||
|
|
||||||
items = OrderedDict([
|
|
||||||
('strain', {'dim': 9, 'shape': [3,3], 'labels':options.strain, 'active':[], 'column': []}),
|
|
||||||
('stress', {'dim': 9, 'shape': [3,3], 'labels':options.stress, 'active':[], 'column': []})
|
|
||||||
])
|
|
||||||
errors = []
|
|
||||||
remarks = []
|
|
||||||
|
|
||||||
for type, data in items.items():
|
|
||||||
for what in data['labels']:
|
|
||||||
dim = table.label_dimension(what)
|
|
||||||
if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type))
|
|
||||||
else:
|
|
||||||
items[type]['active'].append(what)
|
|
||||||
items[type]['column'].append(table.label_index(what))
|
|
||||||
table.labels_append('Mises({})'.format(what)) # extend ASCII header with new labels
|
|
||||||
|
|
||||||
if remarks != []: damask.util.croak(remarks)
|
|
||||||
if errors != []:
|
|
||||||
damask.util.croak(errors)
|
|
||||||
table.close(dismiss = True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# ------------------------------------------ assemble header --------------------------------------
|
|
||||||
|
|
||||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ process data ------------------------------------------
|
|
||||||
|
|
||||||
outputAlive = True
|
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
|
||||||
for type, data in items.items():
|
|
||||||
for column in data['column']:
|
|
||||||
table.data_append(Mises(type,
|
|
||||||
np.array(table.data[column:column+data['dim']],'d').reshape(data['shape'])))
|
|
||||||
outputAlive = table.data_write() # output processed line
|
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
|
||||||
|
|
||||||
table.close() # close input ASCII table (works for stdin)
|
|
||||||
|
|
|
@ -2,10 +2,9 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from io import StringIO
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import damask
|
import damask
|
||||||
|
|
||||||
|
|
||||||
|
@ -36,53 +35,16 @@ parser.set_defaults(defgrad = 'f',
|
||||||
)
|
)
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
if filenames == []: filenames = [None]
|
||||||
|
|
||||||
for name in filenames:
|
for name in filenames:
|
||||||
try:
|
|
||||||
table = damask.ASCIItable(name = name,
|
|
||||||
buffered = False)
|
|
||||||
except: continue
|
|
||||||
damask.util.report(scriptName,name)
|
damask.util.report(scriptName,name)
|
||||||
|
|
||||||
# ------------------------------------------ read header ------------------------------------------
|
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||||
|
|
||||||
table.head_read()
|
table.add('S',
|
||||||
|
damask.mechanics.PK2(table.get(options.defgrad).reshape(-1,3,3),
|
||||||
|
table.get(options.stress ).reshape(-1,3,3)).reshape(-1,9),
|
||||||
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
# ------------------------------------------ sanity checks ----------------------------------------
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
|
|
||||||
errors = []
|
|
||||||
column = {}
|
|
||||||
|
|
||||||
for tensor in [options.defgrad,options.stress]:
|
|
||||||
dim = table.label_dimension(tensor)
|
|
||||||
if dim < 0: errors.append('column {} not found.'.format(tensor))
|
|
||||||
elif dim != 9: errors.append('column {} is not a tensor.'.format(tensor))
|
|
||||||
else:
|
|
||||||
column[tensor] = table.label_index(tensor)
|
|
||||||
|
|
||||||
if errors != []:
|
|
||||||
damask.util.croak(errors)
|
|
||||||
table.close(dismiss = True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# ------------------------------------------ assemble header --------------------------------------
|
|
||||||
|
|
||||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
|
||||||
table.labels_append(['{}_S'.format(i+1) for i in range(9)]) # extend ASCII header with new labels
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ process data ------------------------------------------
|
|
||||||
outputAlive = True
|
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
|
||||||
F = np.array(list(map(float,table.data[column[options.defgrad]:column[options.defgrad]+9])),'d').reshape(3,3)
|
|
||||||
P = np.array(list(map(float,table.data[column[options.stress ]:column[options.stress ]+9])),'d').reshape(3,3)
|
|
||||||
table.data_append(list(np.dot(np.linalg.inv(F),P).reshape(9))) # [S] =[P].[F-1]
|
|
||||||
outputAlive = table.data_write() # output processed line
|
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
|
||||||
|
|
||||||
table.close() # close input ASCII table (works for stdin)
|
|
||||||
|
|
|
@ -2,8 +2,8 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from io import StringIO
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
import re
|
|
||||||
|
|
||||||
import damask
|
import damask
|
||||||
|
|
||||||
|
@ -35,62 +35,18 @@ parser.set_defaults(label = [],
|
||||||
)
|
)
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
pattern = [re.compile('^()(.+)$'), # label pattern for scalar
|
|
||||||
re.compile('^(\d+_)?(.+)$'), # label pattern for multidimension
|
|
||||||
]
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
if filenames == []: filenames = [None]
|
||||||
|
|
||||||
|
if len(options.label) != len(options.substitute):
|
||||||
|
parser.error('number of column labels and substitutes do not match.')
|
||||||
|
|
||||||
for name in filenames:
|
for name in filenames:
|
||||||
try: table = damask.ASCIItable(name = name,
|
|
||||||
buffered = False)
|
|
||||||
except: continue
|
|
||||||
damask.util.report(scriptName,name)
|
damask.util.report(scriptName,name)
|
||||||
|
|
||||||
# ------------------------------------------ read header ------------------------------------------
|
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||||
|
for i,label in enumerate(options.label):
|
||||||
|
table.rename(label,
|
||||||
|
options.substitute[i],
|
||||||
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
table.head_read()
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
|
|
||||||
# ------------------------------------------ process labels ---------------------------------------
|
|
||||||
|
|
||||||
errors = []
|
|
||||||
remarks = []
|
|
||||||
|
|
||||||
if len(options.label) == 0:
|
|
||||||
errors.append('no labels specified.')
|
|
||||||
elif len(options.label) != len(options.substitute):
|
|
||||||
errors.append('mismatch between number of labels ({}) and substitutes ({}).'.format(len(options.label),
|
|
||||||
len(options.substitute)))
|
|
||||||
else:
|
|
||||||
indices = table.label_index (options.label)
|
|
||||||
dimensions = table.label_dimension(options.label)
|
|
||||||
for i,index in enumerate(indices):
|
|
||||||
if index == -1: remarks.append('label "{}" not present...'.format(options.label[i]))
|
|
||||||
else:
|
|
||||||
m = pattern[int(dimensions[i]>1)].match(table.tags[index]) # isolate label name
|
|
||||||
for j in range(dimensions[i]):
|
|
||||||
table.tags[index+j] = table.tags[index+j].replace(m.group(2),options.substitute[i]) # replace name with substitute
|
|
||||||
|
|
||||||
if remarks != []: damask.util.croak(remarks)
|
|
||||||
if errors != []:
|
|
||||||
damask.util.croak(errors)
|
|
||||||
table.close(dismiss = True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# ------------------------------------------ assemble header ---------------------------------------
|
|
||||||
|
|
||||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ process data ------------------------------------------
|
|
||||||
|
|
||||||
outputAlive = True
|
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
|
||||||
outputAlive = table.data_write() # output processed line
|
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
|
||||||
|
|
||||||
table.close() # close ASCII tables
|
|
||||||
|
|
|
@ -2,10 +2,9 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from io import StringIO
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import damask
|
import damask
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,7 +22,7 @@ Uniformly scale column values by given factor.
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
||||||
parser.add_option('-l','--label',
|
parser.add_option('-l','--label',
|
||||||
dest = 'label',
|
dest = 'labels',
|
||||||
action = 'extend', metavar = '<string LIST>',
|
action = 'extend', metavar = '<string LIST>',
|
||||||
help ='column(s) to scale')
|
help ='column(s) to scale')
|
||||||
parser.add_option('-f','--factor',
|
parser.add_option('-f','--factor',
|
||||||
|
@ -32,61 +31,21 @@ parser.add_option('-f','--factor',
|
||||||
help = 'factor(s) per column')
|
help = 'factor(s) per column')
|
||||||
|
|
||||||
parser.set_defaults(label = [],
|
parser.set_defaults(label = [],
|
||||||
)
|
factor = [])
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
if len(options.label) != len(options.factor):
|
|
||||||
parser.error('number of column labels and factors do not match.')
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
if filenames == []: filenames = [None]
|
||||||
|
|
||||||
|
if len(options.labels) != len(options.factor):
|
||||||
|
parser.error('number of column labels and factors do not match.')
|
||||||
|
|
||||||
for name in filenames:
|
for name in filenames:
|
||||||
try:
|
|
||||||
table = damask.ASCIItable(name = name,
|
|
||||||
buffered = False)
|
|
||||||
except: continue
|
|
||||||
damask.util.report(scriptName,name)
|
damask.util.report(scriptName,name)
|
||||||
|
|
||||||
# ------------------------------------------ read header ------------------------------------------
|
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||||
|
for i,label in enumerate(options.labels):
|
||||||
|
table.set(label,
|
||||||
|
table.get(label)*float(options.factor[i]),
|
||||||
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
table.head_read()
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
|
|
||||||
errors = []
|
|
||||||
remarks = []
|
|
||||||
columns = []
|
|
||||||
dims = []
|
|
||||||
factors = []
|
|
||||||
|
|
||||||
for what,factor in zip(options.label,options.factor):
|
|
||||||
col = table.label_index(what)
|
|
||||||
if col < 0: remarks.append('column {} not found...'.format(what,type))
|
|
||||||
else:
|
|
||||||
columns.append(col)
|
|
||||||
factors.append(float(factor))
|
|
||||||
dims.append(table.label_dimension(what))
|
|
||||||
|
|
||||||
if remarks != []: damask.util.croak(remarks)
|
|
||||||
if errors != []:
|
|
||||||
damask.util.croak(errors)
|
|
||||||
table.close(dismiss = True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# ------------------------------------------ assemble header ---------------------------------------
|
|
||||||
|
|
||||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ process data ------------------------------------------
|
|
||||||
|
|
||||||
outputAlive = True
|
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
|
||||||
for col,dim,factor in zip(columns,dims,factors): # loop over items
|
|
||||||
table.data[col:col+dim] = factor * np.array(table.data[col:col+dim],'d')
|
|
||||||
outputAlive = table.data_write() # output processed line
|
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
|
||||||
|
|
||||||
table.close() # close ASCII tables
|
|
||||||
|
|
|
@ -2,10 +2,9 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from io import StringIO
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import damask
|
import damask
|
||||||
|
|
||||||
|
|
||||||
|
@ -23,7 +22,7 @@ Uniformly shift column values by given offset.
|
||||||
""", version = scriptID)
|
""", version = scriptID)
|
||||||
|
|
||||||
parser.add_option('-l','--label',
|
parser.add_option('-l','--label',
|
||||||
dest = 'label',
|
dest = 'labels',
|
||||||
action = 'extend', metavar = '<string LIST>',
|
action = 'extend', metavar = '<string LIST>',
|
||||||
help ='column(s) to shift')
|
help ='column(s) to shift')
|
||||||
parser.add_option('-o','--offset',
|
parser.add_option('-o','--offset',
|
||||||
|
@ -32,61 +31,21 @@ parser.add_option('-o','--offset',
|
||||||
help = 'offset(s) per column')
|
help = 'offset(s) per column')
|
||||||
|
|
||||||
parser.set_defaults(label = [],
|
parser.set_defaults(label = [],
|
||||||
)
|
offset = [])
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
if len(options.label) != len(options.offset):
|
|
||||||
parser.error('number of column labels and offsets do not match.')
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
if filenames == []: filenames = [None]
|
||||||
|
|
||||||
|
if len(options.labels) != len(options.offset):
|
||||||
|
parser.error('number of column labels and offsets do not match.')
|
||||||
|
|
||||||
for name in filenames:
|
for name in filenames:
|
||||||
try:
|
|
||||||
table = damask.ASCIItable(name = name,
|
|
||||||
buffered = False)
|
|
||||||
except: continue
|
|
||||||
damask.util.report(scriptName,name)
|
damask.util.report(scriptName,name)
|
||||||
|
|
||||||
# ------------------------------------------ read header ------------------------------------------
|
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||||
|
for i,label in enumerate(options.labels):
|
||||||
|
table.set(label,
|
||||||
|
table.get(label)+float(options.offset[i]),
|
||||||
|
scriptID+' '+' '.join(sys.argv[1:]))
|
||||||
|
|
||||||
table.head_read()
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
|
|
||||||
errors = []
|
|
||||||
remarks = []
|
|
||||||
columns = []
|
|
||||||
dims = []
|
|
||||||
offsets = []
|
|
||||||
|
|
||||||
for what,offset in zip(options.label,options.offset):
|
|
||||||
col = table.label_index(what)
|
|
||||||
if col < 0: remarks.append('column {} not found...'.format(what,type))
|
|
||||||
else:
|
|
||||||
columns.append(col)
|
|
||||||
offsets.append(float(offset))
|
|
||||||
dims.append(table.label_dimension(what))
|
|
||||||
|
|
||||||
if remarks != []: damask.util.croak(remarks)
|
|
||||||
if errors != []:
|
|
||||||
damask.util.croak(errors)
|
|
||||||
table.close(dismiss = True)
|
|
||||||
continue
|
|
||||||
|
|
||||||
# ------------------------------------------ assemble header ---------------------------------------
|
|
||||||
|
|
||||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ process data ------------------------------------------
|
|
||||||
|
|
||||||
outputAlive = True
|
|
||||||
while outputAlive and table.data_read(): # read next data line of ASCII table
|
|
||||||
for col,dim,offset in zip(columns,dims,offsets): # loop over items
|
|
||||||
table.data[col:col+dim] = offset + np.array(table.data[col:col+dim],'d')
|
|
||||||
outputAlive = table.data_write() # output processed line
|
|
||||||
|
|
||||||
# ------------------------------------------ output finalization -----------------------------------
|
|
||||||
|
|
||||||
table.close() # close ASCII tables
|
|
||||||
|
|
|
@ -2,10 +2,9 @@
|
||||||
|
|
||||||
import os
|
import os
|
||||||
import sys
|
import sys
|
||||||
|
from io import StringIO
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
|
|
||||||
import damask
|
import damask
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,7 +25,7 @@ With coordinates in columns "x", "y", and "z"; sorting with x slowest and z fast
|
||||||
|
|
||||||
|
|
||||||
parser.add_option('-l','--label',
|
parser.add_option('-l','--label',
|
||||||
dest = 'keys',
|
dest = 'labels',
|
||||||
action = 'extend', metavar = '<string LIST>',
|
action = 'extend', metavar = '<string LIST>',
|
||||||
help = 'list of column labels (a,b,c,...)')
|
help = 'list of column labels (a,b,c,...)')
|
||||||
parser.add_option('-r','--reverse',
|
parser.add_option('-r','--reverse',
|
||||||
|
@ -38,42 +37,14 @@ parser.set_defaults(reverse = False,
|
||||||
)
|
)
|
||||||
|
|
||||||
(options,filenames) = parser.parse_args()
|
(options,filenames) = parser.parse_args()
|
||||||
|
|
||||||
|
|
||||||
# --- loop over input files -------------------------------------------------------------------------
|
|
||||||
|
|
||||||
if filenames == []: filenames = [None]
|
if filenames == []: filenames = [None]
|
||||||
|
|
||||||
|
if options.labels is None:
|
||||||
|
parser.error('no labels specified.')
|
||||||
for name in filenames:
|
for name in filenames:
|
||||||
try: table = damask.ASCIItable(name = name,
|
|
||||||
buffered = False)
|
|
||||||
except: continue
|
|
||||||
damask.util.report(scriptName,name)
|
damask.util.report(scriptName,name)
|
||||||
|
|
||||||
# ------------------------------------------ assemble header ---------------------------------------
|
table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
|
||||||
|
table.sort_by(options.labels,not options.reverse)
|
||||||
|
|
||||||
table.head_read()
|
table.to_ASCII(sys.stdout if name is None else name)
|
||||||
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
|
||||||
table.head_write()
|
|
||||||
|
|
||||||
# ------------------------------------------ process data ---------------------------------------
|
|
||||||
|
|
||||||
table.data_readArray()
|
|
||||||
|
|
||||||
keys = table.labels(raw = True)[::-1] if options.keys is None else options.keys[::-1] # numpy sorts with most significant column as last
|
|
||||||
|
|
||||||
cols = []
|
|
||||||
remarks = []
|
|
||||||
for i,column in enumerate(table.label_index(keys)):
|
|
||||||
if column < 0: remarks.append('label "{}" not present...'.format(keys[i]))
|
|
||||||
else: cols += [table.data[:,column]]
|
|
||||||
if remarks != []: damask.util.croak(remarks)
|
|
||||||
|
|
||||||
ind = np.lexsort(cols) if cols != [] else np.arange(table.data.shape[0])
|
|
||||||
if options.reverse: ind = ind[::-1]
|
|
||||||
|
|
||||||
# ------------------------------------------ output result ---------------------------------------
|
|
||||||
|
|
||||||
table.data = table.data[ind]
|
|
||||||
table.data_writeArray()
|
|
||||||
table.close() # close ASCII table
|
|
||||||
|
|
|
@ -9,6 +9,7 @@ name = 'damask'
|
||||||
# classes
|
# classes
|
||||||
from .environment import Environment # noqa
|
from .environment import Environment # noqa
|
||||||
from .asciitable import ASCIItable # noqa
|
from .asciitable import ASCIItable # noqa
|
||||||
|
from .table import Table # noqa
|
||||||
|
|
||||||
from .config import Material # noqa
|
from .config import Material # noqa
|
||||||
from .colormaps import Colormap, Color # noqa
|
from .colormaps import Colormap, Color # noqa
|
||||||
|
|
|
@ -18,17 +18,17 @@ class DADF5():
|
||||||
"""
|
"""
|
||||||
|
|
||||||
# ------------------------------------------------------------------
|
# ------------------------------------------------------------------
|
||||||
def __init__(self,filename):
|
def __init__(self,fname):
|
||||||
"""
|
"""
|
||||||
Opens an existing DADF5 file.
|
Opens an existing DADF5 file.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
filename : str
|
fname : str
|
||||||
name of the DADF5 file to be openend.
|
name of the DADF5 file to be openend.
|
||||||
|
|
||||||
"""
|
"""
|
||||||
with h5py.File(filename,'r') as f:
|
with h5py.File(fname,'r') as f:
|
||||||
|
|
||||||
try:
|
try:
|
||||||
self.version_major = f.attrs['DADF5_version_major']
|
self.version_major = f.attrs['DADF5_version_major']
|
||||||
|
@ -72,7 +72,7 @@ class DADF5():
|
||||||
'con_physics': self.con_physics,
|
'con_physics': self.con_physics,
|
||||||
'mat_physics': self.mat_physics}
|
'mat_physics': self.mat_physics}
|
||||||
|
|
||||||
self.filename = filename
|
self.fname = fname
|
||||||
|
|
||||||
|
|
||||||
def __manage_visible(self,datasets,what,action):
|
def __manage_visible(self,datasets,what,action):
|
||||||
|
@ -315,7 +315,7 @@ class DADF5():
|
||||||
|
|
||||||
groups = []
|
groups = []
|
||||||
|
|
||||||
with h5py.File(self.filename,'r') as f:
|
with h5py.File(self.fname,'r') as f:
|
||||||
for i in self.iter_visible('increments'):
|
for i in self.iter_visible('increments'):
|
||||||
for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']):
|
for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']):
|
||||||
for oo in self.iter_visible(o):
|
for oo in self.iter_visible(o):
|
||||||
|
@ -332,9 +332,9 @@ class DADF5():
|
||||||
def list_data(self):
|
def list_data(self):
|
||||||
"""Return information on all active datasets in the file."""
|
"""Return information on all active datasets in the file."""
|
||||||
message = ''
|
message = ''
|
||||||
with h5py.File(self.filename,'r') as f:
|
with h5py.File(self.fname,'r') as f:
|
||||||
for i in self.iter_visible('increments'):
|
for s,i in enumerate(self.iter_visible('increments')):
|
||||||
message+='\n{} ({}s)\n'.format(i,self.times[self.increments.index(i)])
|
message+='\n{} ({}s)\n'.format(i,self.times[s])
|
||||||
for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']):
|
for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']):
|
||||||
for oo in self.iter_visible(o):
|
for oo in self.iter_visible(o):
|
||||||
message+=' {}\n'.format(oo)
|
message+=' {}\n'.format(oo)
|
||||||
|
@ -353,7 +353,7 @@ class DADF5():
|
||||||
def get_dataset_location(self,label):
|
def get_dataset_location(self,label):
|
||||||
"""Return the location of all active datasets with given label."""
|
"""Return the location of all active datasets with given label."""
|
||||||
path = []
|
path = []
|
||||||
with h5py.File(self.filename,'r') as f:
|
with h5py.File(self.fname,'r') as f:
|
||||||
for i in self.iter_visible('increments'):
|
for i in self.iter_visible('increments'):
|
||||||
k = '/'.join([i,'geometry',label])
|
k = '/'.join([i,'geometry',label])
|
||||||
try:
|
try:
|
||||||
|
@ -375,14 +375,14 @@ class DADF5():
|
||||||
|
|
||||||
def get_constituent_ID(self,c=0):
|
def get_constituent_ID(self,c=0):
|
||||||
"""Pointwise constituent ID."""
|
"""Pointwise constituent ID."""
|
||||||
with h5py.File(self.filename,'r') as f:
|
with h5py.File(self.fname,'r') as f:
|
||||||
names = f['/mapping/cellResults/constituent']['Name'][:,c].astype('str')
|
names = f['/mapping/cellResults/constituent']['Name'][:,c].astype('str')
|
||||||
return np.array([int(n.split('_')[0]) for n in names.tolist()],dtype=np.int32)
|
return np.array([int(n.split('_')[0]) for n in names.tolist()],dtype=np.int32)
|
||||||
|
|
||||||
|
|
||||||
def get_crystal_structure(self): # ToDo: extension to multi constituents/phase
|
def get_crystal_structure(self): # ToDo: extension to multi constituents/phase
|
||||||
"""Info about the crystal structure."""
|
"""Info about the crystal structure."""
|
||||||
with h5py.File(self.filename,'r') as f:
|
with h5py.File(self.fname,'r') as f:
|
||||||
return f[self.get_dataset_location('orientation')[0]].attrs['Lattice'].astype('str') # np.bytes_ to string
|
return f[self.get_dataset_location('orientation')[0]].attrs['Lattice'].astype('str') # np.bytes_ to string
|
||||||
|
|
||||||
|
|
||||||
|
@ -392,7 +392,7 @@ class DADF5():
|
||||||
|
|
||||||
If more than one path is given, the dataset is composed of the individual contributions.
|
If more than one path is given, the dataset is composed of the individual contributions.
|
||||||
"""
|
"""
|
||||||
with h5py.File(self.filename,'r') as f:
|
with h5py.File(self.fname,'r') as f:
|
||||||
shape = (self.Nmaterialpoints,) + np.shape(f[path[0]])[1:]
|
shape = (self.Nmaterialpoints,) + np.shape(f[path[0]])[1:]
|
||||||
if len(shape) == 1: shape = shape +(1,)
|
if len(shape) == 1: shape = shape +(1,)
|
||||||
dataset = np.full(shape,np.nan,dtype=np.dtype(f[path[0]]))
|
dataset = np.full(shape,np.nan,dtype=np.dtype(f[path[0]]))
|
||||||
|
@ -435,7 +435,7 @@ class DADF5():
|
||||||
)
|
)
|
||||||
return np.concatenate((x[:,:,:,None],y[:,:,:,None],y[:,:,:,None]),axis = 3).reshape([np.product(self.grid),3])
|
return np.concatenate((x[:,:,:,None],y[:,:,:,None],y[:,:,:,None]),axis = 3).reshape([np.product(self.grid),3])
|
||||||
else:
|
else:
|
||||||
with h5py.File(self.filename,'r') as f:
|
with h5py.File(self.fname,'r') as f:
|
||||||
return f['geometry/x_c'][()]
|
return f['geometry/x_c'][()]
|
||||||
|
|
||||||
|
|
||||||
|
@ -815,7 +815,7 @@ class DADF5():
|
||||||
todo = []
|
todo = []
|
||||||
# ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task
|
# ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task
|
||||||
for group in self.groups_with_datasets([d['label'] for d in datasets_requested]):
|
for group in self.groups_with_datasets([d['label'] for d in datasets_requested]):
|
||||||
with h5py.File(self.filename,'r') as f:
|
with h5py.File(self.fname,'r') as f:
|
||||||
datasets_in = {}
|
datasets_in = {}
|
||||||
for d in datasets_requested:
|
for d in datasets_requested:
|
||||||
loc = f[group+'/'+d['label']]
|
loc = f[group+'/'+d['label']]
|
||||||
|
@ -830,7 +830,7 @@ class DADF5():
|
||||||
N_not_calculated = len(todo)
|
N_not_calculated = len(todo)
|
||||||
while N_not_calculated > 0:
|
while N_not_calculated > 0:
|
||||||
result = results.get()
|
result = results.get()
|
||||||
with h5py.File(self.filename,'a') as f: # write to file
|
with h5py.File(self.fname,'a') as f: # write to file
|
||||||
dataset_out = f[result['group']].create_dataset(result['label'],data=result['data'])
|
dataset_out = f[result['group']].create_dataset(result['label'],data=result['data'])
|
||||||
for k in result['meta'].keys():
|
for k in result['meta'].keys():
|
||||||
dataset_out.attrs[k] = result['meta'][k].encode()
|
dataset_out.attrs[k] = result['meta'][k].encode()
|
||||||
|
|
|
@ -239,8 +239,8 @@ class Geom():
|
||||||
header.append('homogenization {}'.format(self.get_homogenization()))
|
header.append('homogenization {}'.format(self.get_homogenization()))
|
||||||
return header
|
return header
|
||||||
|
|
||||||
@classmethod
|
@staticmethod
|
||||||
def from_file(cls,fname):
|
def from_file(fname):
|
||||||
"""
|
"""
|
||||||
Reads a geom file.
|
Reads a geom file.
|
||||||
|
|
||||||
|
@ -300,7 +300,7 @@ class Geom():
|
||||||
if not np.any(np.mod(microstructure.flatten(),1) != 0.0): # no float present
|
if not np.any(np.mod(microstructure.flatten(),1) != 0.0): # no float present
|
||||||
microstructure = microstructure.astype('int')
|
microstructure = microstructure.astype('int')
|
||||||
|
|
||||||
return cls(microstructure.reshape(grid),size,origin,homogenization,comments)
|
return Geom(microstructure.reshape(grid),size,origin,homogenization,comments)
|
||||||
|
|
||||||
|
|
||||||
def to_file(self,fname,pack=None):
|
def to_file(self,fname,pack=None):
|
||||||
|
|
|
@ -21,6 +21,25 @@ def Cauchy(F,P):
|
||||||
return symmetric(sigma)
|
return symmetric(sigma)
|
||||||
|
|
||||||
|
|
||||||
|
def PK2(F,P):
|
||||||
|
"""
|
||||||
|
Return 2. Piola-Kirchhoff stress calculated from 1. Piola-Kirchhoff stress and deformation gradient.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
F : numpy.array of shape (:,3,3) or (3,3)
|
||||||
|
Deformation gradient.
|
||||||
|
P : numpy.array of shape (:,3,3) or (3,3)
|
||||||
|
1. Piola-Kirchhoff stress.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if np.shape(F) == np.shape(P) == (3,3):
|
||||||
|
S = np.dot(np.linalg.inv(F),P)
|
||||||
|
else:
|
||||||
|
S = np.einsum('ijk,ikl->ijl',np.linalg.inv(F),P)
|
||||||
|
return S
|
||||||
|
|
||||||
|
|
||||||
def strain_tensor(F,t,m):
|
def strain_tensor(F,t,m):
|
||||||
"""
|
"""
|
||||||
Return strain tensor calculated from deformation gradient.
|
Return strain tensor calculated from deformation gradient.
|
||||||
|
|
|
@ -0,0 +1,257 @@
|
||||||
|
import re
|
||||||
|
|
||||||
|
import pandas as pd
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
class Table():
|
||||||
|
"""Store spreadsheet-like data."""
|
||||||
|
|
||||||
|
def __init__(self,data,shapes,comments=None):
|
||||||
|
"""
|
||||||
|
New spreadsheet.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
data : numpy.ndarray
|
||||||
|
Data.
|
||||||
|
shapes : dict with str:tuple pairs
|
||||||
|
Shapes of the columns. Example 'F':(3,3) for a deformation gradient.
|
||||||
|
comments : iterable of str, optional
|
||||||
|
Additional, human-readable information.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.comments = [] if comments is None else [c for c in comments]
|
||||||
|
self.data = pd.DataFrame(data=data)
|
||||||
|
self.shapes = shapes
|
||||||
|
self.__label_condensed()
|
||||||
|
|
||||||
|
|
||||||
|
def __label_flat(self):
|
||||||
|
"""Label data individually, e.g. v v v ==> 1_v 2_v 3_v."""
|
||||||
|
labels = []
|
||||||
|
for label,shape in self.shapes.items():
|
||||||
|
size = np.prod(shape)
|
||||||
|
labels += ['{}{}'.format('' if size == 1 else '{}_'.format(i+1),label) for i in range(size)]
|
||||||
|
self.data.columns = labels
|
||||||
|
|
||||||
|
|
||||||
|
def __label_condensed(self):
|
||||||
|
"""Label data condensed, e.g. 1_v 2_v 3_v ==> v v v."""
|
||||||
|
labels = []
|
||||||
|
for label,shape in self.shapes.items():
|
||||||
|
labels += [label] * np.prod(shape)
|
||||||
|
self.data.columns = labels
|
||||||
|
|
||||||
|
|
||||||
|
def __add_comment(self,label,shape,info):
|
||||||
|
if info is not None:
|
||||||
|
self.comments.append('{}{}: {}'.format(label,
|
||||||
|
' '+str(shape) if np.prod(shape,dtype=int) > 1 else '',
|
||||||
|
info))
|
||||||
|
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
def from_ASCII(fname):
|
||||||
|
"""
|
||||||
|
Create table from ASCII file.
|
||||||
|
|
||||||
|
The first line needs to indicate the number of subsequent header lines as 'n header'.
|
||||||
|
Vector data column labels are indicated by '1_v, 2_v, ..., n_v'.
|
||||||
|
Tensor data column labels are indicated by '3x3:1_T, 3x3:2_T, ..., 3x3:9_T'.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
fname : file, str, or pathlib.Path
|
||||||
|
Filename or file for reading.
|
||||||
|
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
f = open(fname)
|
||||||
|
except TypeError:
|
||||||
|
f = fname
|
||||||
|
|
||||||
|
header,keyword = f.readline().split()
|
||||||
|
if keyword == 'header':
|
||||||
|
header = int(header)
|
||||||
|
else:
|
||||||
|
raise Exception
|
||||||
|
comments = [f.readline()[:-1] for i in range(1,header)]
|
||||||
|
labels = f.readline().split()
|
||||||
|
|
||||||
|
shapes = {}
|
||||||
|
for label in labels:
|
||||||
|
tensor_column = re.search(r'[0-9,x]*?:[0-9]*?_',label)
|
||||||
|
if tensor_column:
|
||||||
|
my_shape = tensor_column.group().split(':',1)[0].split('x')
|
||||||
|
shapes[label.split('_',1)[1]] = tuple([int(d) for d in my_shape])
|
||||||
|
else:
|
||||||
|
vector_column = re.match(r'[0-9]*?_',label)
|
||||||
|
if vector_column:
|
||||||
|
shapes[label.split('_',1)[1]] = (int(label.split('_',1)[0]),)
|
||||||
|
else:
|
||||||
|
shapes[label] = (1,)
|
||||||
|
|
||||||
|
data = pd.read_csv(f,names=list(range(len(labels))),sep=r'\s+').to_numpy()
|
||||||
|
|
||||||
|
return Table(data,shapes,comments)
|
||||||
|
|
||||||
|
@property
|
||||||
|
def labels(self):
|
||||||
|
"""Return the labels of all columns."""
|
||||||
|
return list(self.shapes.keys())
|
||||||
|
|
||||||
|
|
||||||
|
def get(self,label):
|
||||||
|
"""
|
||||||
|
Get column data.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
label : str
|
||||||
|
Column label.
|
||||||
|
|
||||||
|
"""
|
||||||
|
if re.match(r'[0-9]*?_',label):
|
||||||
|
idx,key = label.split('_',1)
|
||||||
|
data = self.data[key].to_numpy()[:,int(idx)-1].reshape((-1,1))
|
||||||
|
else:
|
||||||
|
data = self.data[label].to_numpy().reshape((-1,)+self.shapes[label])
|
||||||
|
|
||||||
|
return data.astype(type(data.flatten()[0]))
|
||||||
|
|
||||||
|
|
||||||
|
def set(self,label,data,info=None):
|
||||||
|
"""
|
||||||
|
Set column data.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
label : str
|
||||||
|
Column label.
|
||||||
|
data : np.ndarray
|
||||||
|
New data.
|
||||||
|
info : str, optional
|
||||||
|
Human-readable information about the new data.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.__add_comment(label,data.shape[1:],info)
|
||||||
|
|
||||||
|
if re.match(r'[0-9]*?_',label):
|
||||||
|
idx,key = label.split('_',1)
|
||||||
|
iloc = self.data.columns.get_loc(key).tolist().index(True) + int(idx) -1
|
||||||
|
self.data.iloc[:,iloc] = data
|
||||||
|
else:
|
||||||
|
self.data[label] = data.reshape(self.data[label].shape)
|
||||||
|
|
||||||
|
|
||||||
|
def add(self,label,data,info=None):
|
||||||
|
"""
|
||||||
|
Add column data.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
label : str
|
||||||
|
Column label.
|
||||||
|
data : np.ndarray
|
||||||
|
Modified data.
|
||||||
|
info : str, optional
|
||||||
|
Human-readable information about the modified data.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.__add_comment(label,data.shape[1:],info)
|
||||||
|
|
||||||
|
self.shapes[label] = data.shape[1:] if len(data.shape) > 1 else (1,)
|
||||||
|
size = np.prod(data.shape[1:],dtype=int)
|
||||||
|
new = pd.DataFrame(data=data.reshape(-1,size),
|
||||||
|
columns=[label]*size,
|
||||||
|
)
|
||||||
|
new.index = self.data.index
|
||||||
|
self.data = pd.concat([self.data,new],axis=1)
|
||||||
|
|
||||||
|
|
||||||
|
def delete(self,label):
|
||||||
|
"""
|
||||||
|
Delete column data.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
label : str
|
||||||
|
Column label.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.data.drop(columns=label,inplace=True)
|
||||||
|
|
||||||
|
del self.shapes[label]
|
||||||
|
|
||||||
|
|
||||||
|
def rename(self,label_old,label_new,info=None):
|
||||||
|
"""
|
||||||
|
Rename column data.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
label_old : str
|
||||||
|
Old column label.
|
||||||
|
label_new : str
|
||||||
|
New column label.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.data.rename(columns={label_old:label_new},inplace=True)
|
||||||
|
|
||||||
|
self.comments.append('{} => {}{}'.format(label_old,
|
||||||
|
label_new,
|
||||||
|
'' if info is None else ': {}'.format(info),
|
||||||
|
))
|
||||||
|
|
||||||
|
self.shapes[label_new] = self.shapes.pop(label_old)
|
||||||
|
|
||||||
|
|
||||||
|
def sort_by(self,labels,ascending=True):
|
||||||
|
"""
|
||||||
|
Get column data.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
label : str or list
|
||||||
|
Column labels.
|
||||||
|
ascending : bool or list, optional
|
||||||
|
Set sort order.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self.__label_flat()
|
||||||
|
self.data.sort_values(labels,axis=0,inplace=True,ascending=ascending)
|
||||||
|
self.__label_condensed()
|
||||||
|
self.comments.append('sorted by [{}]'.format(', '.join(labels)))
|
||||||
|
|
||||||
|
|
||||||
|
def to_ASCII(self,fname):
|
||||||
|
"""
|
||||||
|
Store as plain text file.
|
||||||
|
|
||||||
|
Parameters
|
||||||
|
----------
|
||||||
|
fname : file, str, or pathlib.Path
|
||||||
|
Filename or file for reading.
|
||||||
|
|
||||||
|
"""
|
||||||
|
labels = []
|
||||||
|
for l in self.shapes:
|
||||||
|
if(self.shapes[l] == (1,)):
|
||||||
|
labels.append('{}'.format(l))
|
||||||
|
elif(len(self.shapes[l]) == 1):
|
||||||
|
labels += ['{}_{}'.format(i+1,l) \
|
||||||
|
for i in range(self.shapes[l][0])]
|
||||||
|
else:
|
||||||
|
labels += ['{}:{}_{}'.format('x'.join([str(d) for d in self.shapes[l]]),i+1,l) \
|
||||||
|
for i in range(np.prod(self.shapes[l],dtype=int))]
|
||||||
|
|
||||||
|
header = ['{} header'.format(len(self.comments)+1)] \
|
||||||
|
+ self.comments \
|
||||||
|
+ [' '.join(labels)]
|
||||||
|
|
||||||
|
try:
|
||||||
|
f = open(fname,'w')
|
||||||
|
except TypeError:
|
||||||
|
f = fname
|
||||||
|
for line in header: f.write(line+'\n')
|
||||||
|
self.data.to_csv(f,sep=' ',index=False,header=False)
|
|
@ -0,0 +1,4 @@
|
||||||
|
1 header
|
||||||
|
a b
|
||||||
|
1.0 hallo
|
||||||
|
0.1 "hallo test"
|
|
@ -0,0 +1,6 @@
|
||||||
|
1 header
|
||||||
|
a b 1_c 2_c
|
||||||
|
1 2 3 4
|
||||||
|
5 6 7 8
|
||||||
|
9 10. 12. 12
|
||||||
|
|
|
@ -0,0 +1,128 @@
|
||||||
|
import os
|
||||||
|
|
||||||
|
import pytest
|
||||||
|
import numpy as np
|
||||||
|
|
||||||
|
from damask import Table
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def default():
|
||||||
|
"""Simple Table."""
|
||||||
|
x = np.ones((5,13),dtype=float)
|
||||||
|
return Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['test data','contains only ones'])
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def reference_dir(reference_dir_base):
|
||||||
|
"""Directory containing reference results."""
|
||||||
|
return os.path.join(reference_dir_base,'Table')
|
||||||
|
|
||||||
|
class TestTable:
|
||||||
|
|
||||||
|
def test_get_scalar(self,default):
|
||||||
|
d = default.get('s')
|
||||||
|
assert np.allclose(d,1.0) and d.shape[1:] == (1,)
|
||||||
|
|
||||||
|
def test_get_vector(self,default):
|
||||||
|
d = default.get('v')
|
||||||
|
assert np.allclose(d,1.0) and d.shape[1:] == (3,)
|
||||||
|
|
||||||
|
def test_get_tensor(self,default):
|
||||||
|
d = default.get('F')
|
||||||
|
assert np.allclose(d,1.0) and d.shape[1:] == (3,3)
|
||||||
|
|
||||||
|
def test_get_component(self,default):
|
||||||
|
d = default.get('5_F')
|
||||||
|
assert np.allclose(d,1.0) and d.shape[1:] == (1,)
|
||||||
|
|
||||||
|
def test_write_read_str(self,default,tmpdir):
|
||||||
|
default.to_ASCII(str(tmpdir.join('default.txt')))
|
||||||
|
new = Table.from_ASCII(str(tmpdir.join('default.txt')))
|
||||||
|
assert all(default.data==new.data)
|
||||||
|
|
||||||
|
def test_write_read_file(self,default,tmpdir):
|
||||||
|
with open(tmpdir.join('default.txt'),'w') as f:
|
||||||
|
default.to_ASCII(f)
|
||||||
|
with open(tmpdir.join('default.txt')) as f:
|
||||||
|
new = Table.from_ASCII(f)
|
||||||
|
assert all(default.data==new.data)
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('fname',['datatype-mix.txt','whitespace-mix.txt'])
|
||||||
|
def test_read_strange(self,reference_dir,fname):
|
||||||
|
with open(os.path.join(reference_dir,fname)) as f:
|
||||||
|
Table.from_ASCII(f)
|
||||||
|
|
||||||
|
def test_set(self,default):
|
||||||
|
default.set('F',np.zeros((5,3,3)),'set to zero')
|
||||||
|
d=default.get('F')
|
||||||
|
assert np.allclose(d,0.0) and d.shape[1:] == (3,3)
|
||||||
|
|
||||||
|
def test_labels(self,default):
|
||||||
|
assert default.labels == ['F','v','s']
|
||||||
|
|
||||||
|
def test_add(self,default):
|
||||||
|
d = np.random.random((5,9))
|
||||||
|
default.add('nine',d,'random data')
|
||||||
|
assert np.allclose(d,default.get('nine'))
|
||||||
|
|
||||||
|
def test_rename_equivalent(self,default):
|
||||||
|
v = default.get('v')
|
||||||
|
default.rename('v','u')
|
||||||
|
u = default.get('u')
|
||||||
|
assert np.all(v == u)
|
||||||
|
|
||||||
|
def test_rename_gone(self,default):
|
||||||
|
default.rename('v','V')
|
||||||
|
with pytest.raises(KeyError):
|
||||||
|
default.get('v')
|
||||||
|
|
||||||
|
def test_delete(self,default):
|
||||||
|
default.delete('v')
|
||||||
|
with pytest.raises(KeyError):
|
||||||
|
default.get('v')
|
||||||
|
|
||||||
|
|
||||||
|
def test_invalid_initialization(self):
|
||||||
|
x = np.random.random((5,10))
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
Table(x,{'F':(3,3)})
|
||||||
|
|
||||||
|
def test_invalid_set(self,default):
|
||||||
|
x = default.get('v')
|
||||||
|
with pytest.raises(ValueError):
|
||||||
|
default.set('F',x,'does not work')
|
||||||
|
|
||||||
|
def test_invalid_get(self,default):
|
||||||
|
with pytest.raises(KeyError):
|
||||||
|
default.get('n')
|
||||||
|
|
||||||
|
def test_sort_scalar(self):
|
||||||
|
x = np.random.random((5,13))
|
||||||
|
t = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data'])
|
||||||
|
unsort = t.get('s')
|
||||||
|
t.sort_by('s')
|
||||||
|
sort = t.get('s')
|
||||||
|
assert np.all(np.sort(unsort,0)==sort)
|
||||||
|
|
||||||
|
def test_sort_component(self):
|
||||||
|
x = np.random.random((5,12))
|
||||||
|
t = Table(x,{'F':(3,3),'v':(3,)},['random test data'])
|
||||||
|
unsort = t.get('4_F')
|
||||||
|
t.sort_by('4_F')
|
||||||
|
sort = t.get('4_F')
|
||||||
|
assert np.all(np.sort(unsort,0)==sort)
|
||||||
|
|
||||||
|
def test_sort_revert(self):
|
||||||
|
x = np.random.random((5,12))
|
||||||
|
t = Table(x,{'F':(3,3),'v':(3,)},['random test data'])
|
||||||
|
t.sort_by('4_F',ascending=False)
|
||||||
|
sort = t.get('4_F')
|
||||||
|
assert np.all(np.sort(sort,0)==sort[::-1,:])
|
||||||
|
|
||||||
|
def test_sort(self):
|
||||||
|
t = Table(np.array([[0,1,],[2,1,]]),
|
||||||
|
{'v':(2,)},
|
||||||
|
['test data'])
|
||||||
|
t.add('s',np.array(['b','a']))
|
||||||
|
t.sort_by('s')
|
||||||
|
assert np.all(t.get('1_v') == np.array([2,0]).reshape((2,1)))
|
Loading…
Reference in New Issue