diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index 47cb2810c..6e82561c5 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -1,6 +1,7 @@ --- stages: - prepareAll + - python - preprocessing - postprocessing - compilePETSc @@ -103,6 +104,16 @@ checkout: - master - release +################################################################################################### +Pytest: + stage: python + script: + - cd $DAMASKROOT/python + - pytest + except: + - master + - release + ################################################################################################### OrientationRelationship: stage: preprocessing diff --git a/VERSION b/VERSION index 1fe12d401..63973c11b 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v2.0.3-1133-gfede8225 +v2.0.3-1136-gcc67f0e1 diff --git a/processing/post/DADF5toDREAM3D.py b/processing/post/DADF5toDREAM3D.py index 885545297..7ab04b934 100755 --- a/processing/post/DADF5toDREAM3D.py +++ b/processing/post/DADF5toDREAM3D.py @@ -49,7 +49,7 @@ Phase_types = {'Primary': 0} #further additions to these can be done by looking # -------------------------------------------------------------------- parser = argparse.ArgumentParser(description='Creating a file for DREAM3D from DAMASK data') parser.add_argument('filenames',nargs='+',help='HDF5 based output file') -parser.add_argument('--inc',nargs='+',help='Increment for which DREAM3D to be used, eg. 00025',type=int) +parser.add_argument('--inc',nargs='+',help='Increment for which DREAM3D to be used, eg. 25',type=int) parser.add_argument('-d','--dir', dest='dir',default='postProc',metavar='string', help='name of subdirectory to hold output') @@ -59,15 +59,13 @@ options = parser.parse_args() # loop over input files for filename in options.filenames: f = damask.DADF5(filename) #DAMASK output file - count = 0 - for increment in f.increments: - if int(increment[3:]) not in options.inc: - count = count + 1 + for increment in options.inc: + f.set_by_increment(increment,increment) + if len(f.visible['increments']) == 0: continue #-------output file creation------------------------------------- dirname = os.path.abspath(os.path.join(os.path.dirname(filename),options.dir)) - print(dirname) try: os.mkdir(dirname) except FileExistsError: @@ -90,11 +88,10 @@ for filename in options.filenames: # Phase information of DREAM.3D is constituent ID in DAMASK o[cell_data_label + '/Phases'] = f.get_constituent_ID().reshape(tuple(f.grid)+(1,)) # Data quaternions - DAMASK_quaternion = f.read_dataset(f.get_dataset_location('orientation'),0) - DREAM_3D_quaternion = np.empty((np.prod(f.grid),4),dtype=np.float32) + DAMASK_quaternion = f.read_dataset(f.get_dataset_location('orientation')) # Convert: DAMASK uses P = -1, DREAM.3D uses P = +1. Also change position of imagninary part DREAM_3D_quaternion = np.hstack((-DAMASK_quaternion['x'],-DAMASK_quaternion['y'],-DAMASK_quaternion['z'], - DAMASK_quaternion['w'])) + DAMASK_quaternion['w'])).astype(np.float32) o[cell_data_label + '/Quats'] = DREAM_3D_quaternion.reshape(tuple(f.grid)+(4,)) # Attributes to CellData group @@ -109,12 +106,14 @@ for filename in options.filenames: # phase attributes o[cell_data_label + '/Phases'].attrs['ComponentDimensions'] = np.array([1],np.uint64) o[cell_data_label + '/Phases'].attrs['ObjectType'] = 'DataArray' + o[cell_data_label + '/Phases'].attrs['TupleDimensions'] = f.grid.astype(np.uint64) # Quats attributes o[cell_data_label + '/Quats'].attrs['ComponentDimensions'] = np.array([4],np.uint64) o[cell_data_label + '/Quats'].attrs['ObjectType'] = 'DataArray' - - # Create EnsembleAttributeMatrix + o[cell_data_label + '/Quats'].attrs['TupleDimensions'] = f.grid.astype(np.uint64) + + # Create EnsembleAttributeMatrix ensemble_label = data_container_label + '/EnsembleAttributeMatrix' # Data CrystalStructures diff --git a/processing/post/addCauchy.py b/processing/post/addCauchy.py index 18c4ec215..afc5a57be 100755 --- a/processing/post/addCauchy.py +++ b/processing/post/addCauchy.py @@ -2,10 +2,9 @@ import os import sys +from io import StringIO from optparse import OptionParser -import numpy as np - import damask @@ -36,54 +35,15 @@ parser.set_defaults(defgrad = 'f', ) (options,filenames) = parser.parse_args() - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, buffered = False) - except: - continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + table.add('Cauchy', + damask.mechanics.Cauchy(table.get(options.defgrad).reshape(-1,3,3), + table.get(options.stress ).reshape(-1,3,3)).reshape(-1,9), + scriptID+' '+' '.join(sys.argv[1:])) - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - errors = [] - column = {} - - for tensor in [options.defgrad,options.stress]: - dim = table.label_dimension(tensor) - if dim < 0: errors.append('column {} not found.'.format(tensor)) - elif dim != 9: errors.append('column {} is not a tensor.'.format(tensor)) - else: - column[tensor] = table.label_index(tensor) - - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.labels_append(['{}_Cauchy'.format(i+1) for i in range(9)]) # extend ASCII header with new labels - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - F = np.array(list(map(float,table.data[column[options.defgrad]:column[options.defgrad]+9])),'d').reshape(3,3) - P = np.array(list(map(float,table.data[column[options.stress ]:column[options.stress ]+9])),'d').reshape(3,3) - table.data_append(list(1.0/np.linalg.det(F)*np.dot(P,F.T).reshape(9))) # [Cauchy] = (1/det(F)) * [P].[F_transpose] - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addDeterminant.py b/processing/post/addDeterminant.py index 14f0321be..f2368559d 100755 --- a/processing/post/addDeterminant.py +++ b/processing/post/addDeterminant.py @@ -2,22 +2,16 @@ import os import sys +from io import StringIO from optparse import OptionParser +import numpy as np + import damask - scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -def determinant(m): - return +m[0]*m[4]*m[8] \ - +m[1]*m[5]*m[6] \ - +m[2]*m[3]*m[7] \ - -m[2]*m[4]*m[6] \ - -m[1]*m[3]*m[8] \ - -m[0]*m[5]*m[7] - # -------------------------------------------------------------------- # MAIN @@ -34,61 +28,18 @@ parser.add_option('-t','--tensor', help = 'heading of columns containing tensor field values') (options,filenames) = parser.parse_args() +if filenames == []: filenames = [None] if options.tensor is None: parser.error('no data column specified.') -# --- loop over input files ------------------------------------------------------------------------- - -if filenames == []: filenames = [None] - for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for tensor in options.tensor: + table.add('det({})'.format(tensor), + np.linalg.det(table.get(tensor).reshape(-1,3,3)), + scriptID+' '+' '.join(sys.argv[1:])) - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - items = { - 'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'column': []}, - } - errors = [] - remarks = [] - - for type, data in items.items(): - for what in data['labels']: - dim = table.label_dimension(what) - if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type)) - else: - items[type]['column'].append(table.label_index(what)) - table.labels_append('det({})'.format(what)) # extend ASCII header with new labels - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - for type, data in items.items(): - for column in data['column']: - table.data_append(determinant(list(map(float,table.data[column: column+data['dim']])))) - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addDeviator.py b/processing/post/addDeviator.py index c9aeaacfd..ca06034b3 100755 --- a/processing/post/addDeviator.py +++ b/processing/post/addDeviator.py @@ -2,6 +2,7 @@ import os import sys +from io import StringIO from optparse import OptionParser import damask @@ -9,17 +10,6 @@ import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -oneThird = 1.0/3.0 - -def deviator(m,spherical = False): # Careful, do not change the value of m, its intent(inout)! - sph = oneThird*(m[0]+m[4]+m[8]) - dev = [ - m[0]-sph, m[1], m[2], - m[3], m[4]-sph, m[5], - m[6], m[7], m[8]-sph, - ] - return dev,sph if spherical else dev - # -------------------------------------------------------------------- # MAIN @@ -40,67 +30,22 @@ parser.add_option('-s','--spherical', help = 'report spherical part of tensor (hydrostatic component, pressure)') (options,filenames) = parser.parse_args() - -if options.tensor is None: - parser.error('no data column specified...') - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if options.tensor is None: + parser.error('no data column specified...') + for name in filenames: - try: - table = damask.ASCIItable(name = name, buffered = False) - except: - continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) + + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for tensor in options.tensor: + table.add('dev({})'.format(tensor), + damask.mechanics.deviatoric_part(table.get(tensor).reshape(-1,3,3)).reshape((-1,9)), + scriptID+' '+' '.join(sys.argv[1:])) + if options.spherical: + table.add('sph({})'.format(tensor), + damask.mechanics.spherical_part(table.get(tensor).reshape(-1,3,3)), + scriptID+' '+' '.join(sys.argv[1:])) -# ------------------------------------------ read header ------------------------------------------ - - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - items = { - 'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'active':[], 'column': []}, - } - errors = [] - remarks = [] - column = {} - - for type, data in items.items(): - for what in data['labels']: - dim = table.label_dimension(what) - if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type)) - else: - items[type]['active'].append(what) - items[type]['column'].append(table.label_index(what)) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - for type, data in items.items(): - for label in data['active']: - table.labels_append(['{}_dev({})'.format(i+1,label) for i in range(data['dim'])] + \ - (['sph({})'.format(label)] if options.spherical else [])) # extend ASCII header with new labels - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - for type, data in items.items(): - for column in data['column']: - table.data_append(deviator(list(map(float,table.data[column: - column+data['dim']])),options.spherical)) - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addInfo.py b/processing/post/addInfo.py index 2d8192cc1..5e32510db 100755 --- a/processing/post/addInfo.py +++ b/processing/post/addInfo.py @@ -1,6 +1,8 @@ #!/usr/bin/env python3 import os +import sys +from io import StringIO from optparse import OptionParser import damask @@ -24,35 +26,16 @@ parser.add_option('-i', dest = 'info', action = 'extend', metavar = '', help = 'items to add') - (options,filenames) = parser.parse_args() +if filenames == []: filenames = [None] if options.info is None: parser.error('no info specified.') -# --- loop over input files ------------------------------------------------------------------------ - -if filenames == []: filenames = [None] - for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ assemble header --------------------------------------- + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + table.comments += options.info - table.head_read() - table.info_append(options.info) - table.head_write() - -# ------------------------------------------ pass through data ------------------------------------- - - outputAlive = True - - while outputAlive and table.data_read(): # read next data line of ASCII table - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addMises.py b/processing/post/addMises.py index be11b0f1c..0c2a6db50 100755 --- a/processing/post/addMises.py +++ b/processing/post/addMises.py @@ -2,10 +2,8 @@ import os import sys +from io import StringIO from optparse import OptionParser -from collections import OrderedDict - -import numpy as np import damask @@ -13,15 +11,6 @@ import damask scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptID = ' '.join([scriptName,damask.version]) -def Mises(what,tensor): - - dev = tensor - np.trace(tensor)/3.0*np.eye(3) - symdev = 0.5*(dev+dev.T) - return np.sqrt(np.sum(symdev*symdev.T)* - { - 'stress': 3.0/2.0, - 'strain': 2.0/3.0, - }[what.lower()]) # -------------------------------------------------------------------- # MAIN @@ -47,62 +36,21 @@ parser.set_defaults(strain = [], (options,filenames) = parser.parse_args() if options.stress is [] and options.strain is []: - parser.error('no data column specified...') - -# --- loop over input files ------------------------------------------------------------------------- + parser.error('no data column specified...') if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ - - table.head_read() - -# ------------------------------------------ sanity checks ---------------------------------------- - - items = OrderedDict([ - ('strain', {'dim': 9, 'shape': [3,3], 'labels':options.strain, 'active':[], 'column': []}), - ('stress', {'dim': 9, 'shape': [3,3], 'labels':options.stress, 'active':[], 'column': []}) - ]) - errors = [] - remarks = [] - - for type, data in items.items(): - for what in data['labels']: - dim = table.label_dimension(what) - if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type)) - else: - items[type]['active'].append(what) - items[type]['column'].append(table.label_index(what)) - table.labels_append('Mises({})'.format(what)) # extend ASCII header with new labels - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - for type, data in items.items(): - for column in data['column']: - table.data_append(Mises(type, - np.array(table.data[column:column+data['dim']],'d').reshape(data['shape']))) - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for strain in options.strain: + table.add('Mises({})'.format(strain), + damask.mechanics.Mises_strain(damask.mechanics.symmetric(table.get(strain).reshape(-1,3,3))), + scriptID+' '+' '.join(sys.argv[1:])) + for stress in options.stress: + table.add('Mises({})'.format(stress), + damask.mechanics.Mises_stress(damask.mechanics.symmetric(table.get(stress).reshape(-1,3,3))), + scriptID+' '+' '.join(sys.argv[1:])) + + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/addPK2.py b/processing/post/addPK2.py index f38753619..185160d79 100755 --- a/processing/post/addPK2.py +++ b/processing/post/addPK2.py @@ -2,10 +2,9 @@ import os import sys +from io import StringIO from optparse import OptionParser -import numpy as np - import damask @@ -36,53 +35,16 @@ parser.set_defaults(defgrad = 'f', ) (options,filenames) = parser.parse_args() - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) - table.head_read() + table.add('S', + damask.mechanics.PK2(table.get(options.defgrad).reshape(-1,3,3), + table.get(options.stress ).reshape(-1,3,3)).reshape(-1,9), + scriptID+' '+' '.join(sys.argv[1:])) -# ------------------------------------------ sanity checks ---------------------------------------- - - errors = [] - column = {} - - for tensor in [options.defgrad,options.stress]: - dim = table.label_dimension(tensor) - if dim < 0: errors.append('column {} not found.'.format(tensor)) - elif dim != 9: errors.append('column {} is not a tensor.'.format(tensor)) - else: - column[tensor] = table.label_index(tensor) - - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header -------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.labels_append(['{}_S'.format(i+1) for i in range(9)]) # extend ASCII header with new labels - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - F = np.array(list(map(float,table.data[column[options.defgrad]:column[options.defgrad]+9])),'d').reshape(3,3) - P = np.array(list(map(float,table.data[column[options.stress ]:column[options.stress ]+9])),'d').reshape(3,3) - table.data_append(list(np.dot(np.linalg.inv(F),P).reshape(9))) # [S] =[P].[F-1] - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close input ASCII table (works for stdin) + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/reLabel.py b/processing/post/reLabel.py index e7ad1f1e9..85d16acea 100755 --- a/processing/post/reLabel.py +++ b/processing/post/reLabel.py @@ -2,8 +2,8 @@ import os import sys +from io import StringIO from optparse import OptionParser -import re import damask @@ -35,62 +35,18 @@ parser.set_defaults(label = [], ) (options,filenames) = parser.parse_args() - -pattern = [re.compile('^()(.+)$'), # label pattern for scalar - re.compile('^(\d+_)?(.+)$'), # label pattern for multidimension - ] - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if len(options.label) != len(options.substitute): + parser.error('number of column labels and substitutes do not match.') + for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for i,label in enumerate(options.label): + table.rename(label, + options.substitute[i], + scriptID+' '+' '.join(sys.argv[1:])) - table.head_read() - -# ------------------------------------------ process labels --------------------------------------- - - errors = [] - remarks = [] - - if len(options.label) == 0: - errors.append('no labels specified.') - elif len(options.label) != len(options.substitute): - errors.append('mismatch between number of labels ({}) and substitutes ({}).'.format(len(options.label), - len(options.substitute))) - else: - indices = table.label_index (options.label) - dimensions = table.label_dimension(options.label) - for i,index in enumerate(indices): - if index == -1: remarks.append('label "{}" not present...'.format(options.label[i])) - else: - m = pattern[int(dimensions[i]>1)].match(table.tags[index]) # isolate label name - for j in range(dimensions[i]): - table.tags[index+j] = table.tags[index+j].replace(m.group(2),options.substitute[i]) # replace name with substitute - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/scaleData.py b/processing/post/scaleData.py index 5b03f8e07..58f853251 100755 --- a/processing/post/scaleData.py +++ b/processing/post/scaleData.py @@ -2,10 +2,9 @@ import os import sys +from io import StringIO from optparse import OptionParser -import numpy as np - import damask @@ -23,7 +22,7 @@ Uniformly scale column values by given factor. """, version = scriptID) parser.add_option('-l','--label', - dest = 'label', + dest = 'labels', action = 'extend', metavar = '', help ='column(s) to scale') parser.add_option('-f','--factor', @@ -32,61 +31,21 @@ parser.add_option('-f','--factor', help = 'factor(s) per column') parser.set_defaults(label = [], - ) + factor = []) (options,filenames) = parser.parse_args() - -if len(options.label) != len(options.factor): - parser.error('number of column labels and factors do not match.') - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if len(options.labels) != len(options.factor): + parser.error('number of column labels and factors do not match.') + for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for i,label in enumerate(options.labels): + table.set(label, + table.get(label)*float(options.factor[i]), + scriptID+' '+' '.join(sys.argv[1:])) - table.head_read() - - errors = [] - remarks = [] - columns = [] - dims = [] - factors = [] - - for what,factor in zip(options.label,options.factor): - col = table.label_index(what) - if col < 0: remarks.append('column {} not found...'.format(what,type)) - else: - columns.append(col) - factors.append(float(factor)) - dims.append(table.label_dimension(what)) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - for col,dim,factor in zip(columns,dims,factors): # loop over items - table.data[col:col+dim] = factor * np.array(table.data[col:col+dim],'d') - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/shiftData.py b/processing/post/shiftData.py index 69a9696fa..57b20fbd0 100755 --- a/processing/post/shiftData.py +++ b/processing/post/shiftData.py @@ -2,10 +2,9 @@ import os import sys +from io import StringIO from optparse import OptionParser -import numpy as np - import damask @@ -23,7 +22,7 @@ Uniformly shift column values by given offset. """, version = scriptID) parser.add_option('-l','--label', - dest = 'label', + dest = 'labels', action = 'extend', metavar = '', help ='column(s) to shift') parser.add_option('-o','--offset', @@ -32,61 +31,21 @@ parser.add_option('-o','--offset', help = 'offset(s) per column') parser.set_defaults(label = [], - ) + offset = []) (options,filenames) = parser.parse_args() - -if len(options.label) != len(options.offset): - parser.error('number of column labels and offsets do not match.') - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if len(options.labels) != len(options.offset): + parser.error('number of column labels and offsets do not match.') + for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ read header ------------------------------------------ + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + for i,label in enumerate(options.labels): + table.set(label, + table.get(label)+float(options.offset[i]), + scriptID+' '+' '.join(sys.argv[1:])) - table.head_read() - - errors = [] - remarks = [] - columns = [] - dims = [] - offsets = [] - - for what,offset in zip(options.label,options.offset): - col = table.label_index(what) - if col < 0: remarks.append('column {} not found...'.format(what,type)) - else: - columns.append(col) - offsets.append(float(offset)) - dims.append(table.label_dimension(what)) - - if remarks != []: damask.util.croak(remarks) - if errors != []: - damask.util.croak(errors) - table.close(dismiss = True) - continue - -# ------------------------------------------ assemble header --------------------------------------- - - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data ------------------------------------------ - - outputAlive = True - while outputAlive and table.data_read(): # read next data line of ASCII table - for col,dim,offset in zip(columns,dims,offsets): # loop over items - table.data[col:col+dim] = offset + np.array(table.data[col:col+dim],'d') - outputAlive = table.data_write() # output processed line - -# ------------------------------------------ output finalization ----------------------------------- - - table.close() # close ASCII tables + table.to_ASCII(sys.stdout if name is None else name) diff --git a/processing/post/sortTable.py b/processing/post/sortTable.py index 53a357226..3a3738d18 100755 --- a/processing/post/sortTable.py +++ b/processing/post/sortTable.py @@ -2,10 +2,9 @@ import os import sys +from io import StringIO from optparse import OptionParser -import numpy as np - import damask @@ -26,7 +25,7 @@ With coordinates in columns "x", "y", and "z"; sorting with x slowest and z fast parser.add_option('-l','--label', - dest = 'keys', + dest = 'labels', action = 'extend', metavar = '', help = 'list of column labels (a,b,c,...)') parser.add_option('-r','--reverse', @@ -38,42 +37,14 @@ parser.set_defaults(reverse = False, ) (options,filenames) = parser.parse_args() - - -# --- loop over input files ------------------------------------------------------------------------- - if filenames == []: filenames = [None] +if options.labels is None: + parser.error('no labels specified.') for name in filenames: - try: table = damask.ASCIItable(name = name, - buffered = False) - except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,name) -# ------------------------------------------ assemble header --------------------------------------- + table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name) + table.sort_by(options.labels,not options.reverse) - table.head_read() - table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) - table.head_write() - -# ------------------------------------------ process data --------------------------------------- - - table.data_readArray() - - keys = table.labels(raw = True)[::-1] if options.keys is None else options.keys[::-1] # numpy sorts with most significant column as last - - cols = [] - remarks = [] - for i,column in enumerate(table.label_index(keys)): - if column < 0: remarks.append('label "{}" not present...'.format(keys[i])) - else: cols += [table.data[:,column]] - if remarks != []: damask.util.croak(remarks) - - ind = np.lexsort(cols) if cols != [] else np.arange(table.data.shape[0]) - if options.reverse: ind = ind[::-1] - -# ------------------------------------------ output result --------------------------------------- - - table.data = table.data[ind] - table.data_writeArray() - table.close() # close ASCII table + table.to_ASCII(sys.stdout if name is None else name) diff --git a/python/damask/__init__.py b/python/damask/__init__.py index f432ef056..2cd26cf1c 100644 --- a/python/damask/__init__.py +++ b/python/damask/__init__.py @@ -9,6 +9,7 @@ name = 'damask' # classes from .environment import Environment # noqa from .asciitable import ASCIItable # noqa +from .table import Table # noqa from .config import Material # noqa from .colormaps import Colormap, Color # noqa diff --git a/python/damask/dadf5.py b/python/damask/dadf5.py index 5c23636db..beced188d 100644 --- a/python/damask/dadf5.py +++ b/python/damask/dadf5.py @@ -18,17 +18,17 @@ class DADF5(): """ # ------------------------------------------------------------------ - def __init__(self,filename): + def __init__(self,fname): """ Opens an existing DADF5 file. Parameters ---------- - filename : str + fname : str name of the DADF5 file to be openend. """ - with h5py.File(filename,'r') as f: + with h5py.File(fname,'r') as f: try: self.version_major = f.attrs['DADF5_version_major'] @@ -72,7 +72,7 @@ class DADF5(): 'con_physics': self.con_physics, 'mat_physics': self.mat_physics} - self.filename = filename + self.fname = fname def __manage_visible(self,datasets,what,action): @@ -315,7 +315,7 @@ class DADF5(): groups = [] - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: for i in self.iter_visible('increments'): for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): for oo in self.iter_visible(o): @@ -332,9 +332,9 @@ class DADF5(): def list_data(self): """Return information on all active datasets in the file.""" message = '' - with h5py.File(self.filename,'r') as f: - for i in self.iter_visible('increments'): - message+='\n{} ({}s)\n'.format(i,self.times[self.increments.index(i)]) + with h5py.File(self.fname,'r') as f: + for s,i in enumerate(self.iter_visible('increments')): + message+='\n{} ({}s)\n'.format(i,self.times[s]) for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): for oo in self.iter_visible(o): message+=' {}\n'.format(oo) @@ -353,7 +353,7 @@ class DADF5(): def get_dataset_location(self,label): """Return the location of all active datasets with given label.""" path = [] - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: for i in self.iter_visible('increments'): k = '/'.join([i,'geometry',label]) try: @@ -375,14 +375,14 @@ class DADF5(): def get_constituent_ID(self,c=0): """Pointwise constituent ID.""" - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: names = f['/mapping/cellResults/constituent']['Name'][:,c].astype('str') return np.array([int(n.split('_')[0]) for n in names.tolist()],dtype=np.int32) def get_crystal_structure(self): # ToDo: extension to multi constituents/phase """Info about the crystal structure.""" - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: return f[self.get_dataset_location('orientation')[0]].attrs['Lattice'].astype('str') # np.bytes_ to string @@ -392,7 +392,7 @@ class DADF5(): If more than one path is given, the dataset is composed of the individual contributions. """ - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: shape = (self.Nmaterialpoints,) + np.shape(f[path[0]])[1:] if len(shape) == 1: shape = shape +(1,) dataset = np.full(shape,np.nan,dtype=np.dtype(f[path[0]])) @@ -435,7 +435,7 @@ class DADF5(): ) return np.concatenate((x[:,:,:,None],y[:,:,:,None],y[:,:,:,None]),axis = 3).reshape([np.product(self.grid),3]) else: - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: return f['geometry/x_c'][()] @@ -815,7 +815,7 @@ class DADF5(): todo = [] # ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task for group in self.groups_with_datasets([d['label'] for d in datasets_requested]): - with h5py.File(self.filename,'r') as f: + with h5py.File(self.fname,'r') as f: datasets_in = {} for d in datasets_requested: loc = f[group+'/'+d['label']] @@ -830,7 +830,7 @@ class DADF5(): N_not_calculated = len(todo) while N_not_calculated > 0: result = results.get() - with h5py.File(self.filename,'a') as f: # write to file + with h5py.File(self.fname,'a') as f: # write to file dataset_out = f[result['group']].create_dataset(result['label'],data=result['data']) for k in result['meta'].keys(): dataset_out.attrs[k] = result['meta'][k].encode() diff --git a/python/damask/geom.py b/python/damask/geom.py index 1c9e10cd1..32ea2ed89 100644 --- a/python/damask/geom.py +++ b/python/damask/geom.py @@ -239,8 +239,8 @@ class Geom(): header.append('homogenization {}'.format(self.get_homogenization())) return header - @classmethod - def from_file(cls,fname): + @staticmethod + def from_file(fname): """ Reads a geom file. @@ -300,7 +300,7 @@ class Geom(): if not np.any(np.mod(microstructure.flatten(),1) != 0.0): # no float present microstructure = microstructure.astype('int') - return cls(microstructure.reshape(grid),size,origin,homogenization,comments) + return Geom(microstructure.reshape(grid),size,origin,homogenization,comments) def to_file(self,fname,pack=None): diff --git a/python/damask/mechanics.py b/python/damask/mechanics.py index 476682380..5503d7048 100644 --- a/python/damask/mechanics.py +++ b/python/damask/mechanics.py @@ -19,7 +19,26 @@ def Cauchy(F,P): else: sigma = np.einsum('i,ijk,ilk->ijl',1.0/np.linalg.det(F),P,F) return symmetric(sigma) - + + +def PK2(F,P): + """ + Return 2. Piola-Kirchhoff stress calculated from 1. Piola-Kirchhoff stress and deformation gradient. + + Parameters + ---------- + F : numpy.array of shape (:,3,3) or (3,3) + Deformation gradient. + P : numpy.array of shape (:,3,3) or (3,3) + 1. Piola-Kirchhoff stress. + + """ + if np.shape(F) == np.shape(P) == (3,3): + S = np.dot(np.linalg.inv(F),P) + else: + S = np.einsum('ijk,ikl->ijl',np.linalg.inv(F),P) + return S + def strain_tensor(F,t,m): """ diff --git a/python/damask/table.py b/python/damask/table.py new file mode 100644 index 000000000..6181fdb1f --- /dev/null +++ b/python/damask/table.py @@ -0,0 +1,257 @@ +import re + +import pandas as pd +import numpy as np + +class Table(): + """Store spreadsheet-like data.""" + + def __init__(self,data,shapes,comments=None): + """ + New spreadsheet. + + Parameters + ---------- + data : numpy.ndarray + Data. + shapes : dict with str:tuple pairs + Shapes of the columns. Example 'F':(3,3) for a deformation gradient. + comments : iterable of str, optional + Additional, human-readable information. + + """ + self.comments = [] if comments is None else [c for c in comments] + self.data = pd.DataFrame(data=data) + self.shapes = shapes + self.__label_condensed() + + + def __label_flat(self): + """Label data individually, e.g. v v v ==> 1_v 2_v 3_v.""" + labels = [] + for label,shape in self.shapes.items(): + size = np.prod(shape) + labels += ['{}{}'.format('' if size == 1 else '{}_'.format(i+1),label) for i in range(size)] + self.data.columns = labels + + + def __label_condensed(self): + """Label data condensed, e.g. 1_v 2_v 3_v ==> v v v.""" + labels = [] + for label,shape in self.shapes.items(): + labels += [label] * np.prod(shape) + self.data.columns = labels + + + def __add_comment(self,label,shape,info): + if info is not None: + self.comments.append('{}{}: {}'.format(label, + ' '+str(shape) if np.prod(shape,dtype=int) > 1 else '', + info)) + + + @staticmethod + def from_ASCII(fname): + """ + Create table from ASCII file. + + The first line needs to indicate the number of subsequent header lines as 'n header'. + Vector data column labels are indicated by '1_v, 2_v, ..., n_v'. + Tensor data column labels are indicated by '3x3:1_T, 3x3:2_T, ..., 3x3:9_T'. + + Parameters + ---------- + fname : file, str, or pathlib.Path + Filename or file for reading. + + """ + try: + f = open(fname) + except TypeError: + f = fname + + header,keyword = f.readline().split() + if keyword == 'header': + header = int(header) + else: + raise Exception + comments = [f.readline()[:-1] for i in range(1,header)] + labels = f.readline().split() + + shapes = {} + for label in labels: + tensor_column = re.search(r'[0-9,x]*?:[0-9]*?_',label) + if tensor_column: + my_shape = tensor_column.group().split(':',1)[0].split('x') + shapes[label.split('_',1)[1]] = tuple([int(d) for d in my_shape]) + else: + vector_column = re.match(r'[0-9]*?_',label) + if vector_column: + shapes[label.split('_',1)[1]] = (int(label.split('_',1)[0]),) + else: + shapes[label] = (1,) + + data = pd.read_csv(f,names=list(range(len(labels))),sep=r'\s+').to_numpy() + + return Table(data,shapes,comments) + + @property + def labels(self): + """Return the labels of all columns.""" + return list(self.shapes.keys()) + + + def get(self,label): + """ + Get column data. + + Parameters + ---------- + label : str + Column label. + + """ + if re.match(r'[0-9]*?_',label): + idx,key = label.split('_',1) + data = self.data[key].to_numpy()[:,int(idx)-1].reshape((-1,1)) + else: + data = self.data[label].to_numpy().reshape((-1,)+self.shapes[label]) + + return data.astype(type(data.flatten()[0])) + + + def set(self,label,data,info=None): + """ + Set column data. + + Parameters + ---------- + label : str + Column label. + data : np.ndarray + New data. + info : str, optional + Human-readable information about the new data. + + """ + self.__add_comment(label,data.shape[1:],info) + + if re.match(r'[0-9]*?_',label): + idx,key = label.split('_',1) + iloc = self.data.columns.get_loc(key).tolist().index(True) + int(idx) -1 + self.data.iloc[:,iloc] = data + else: + self.data[label] = data.reshape(self.data[label].shape) + + + def add(self,label,data,info=None): + """ + Add column data. + + Parameters + ---------- + label : str + Column label. + data : np.ndarray + Modified data. + info : str, optional + Human-readable information about the modified data. + + """ + self.__add_comment(label,data.shape[1:],info) + + self.shapes[label] = data.shape[1:] if len(data.shape) > 1 else (1,) + size = np.prod(data.shape[1:],dtype=int) + new = pd.DataFrame(data=data.reshape(-1,size), + columns=[label]*size, + ) + new.index = self.data.index + self.data = pd.concat([self.data,new],axis=1) + + + def delete(self,label): + """ + Delete column data. + + Parameters + ---------- + label : str + Column label. + + """ + self.data.drop(columns=label,inplace=True) + + del self.shapes[label] + + + def rename(self,label_old,label_new,info=None): + """ + Rename column data. + + Parameters + ---------- + label_old : str + Old column label. + label_new : str + New column label. + + """ + self.data.rename(columns={label_old:label_new},inplace=True) + + self.comments.append('{} => {}{}'.format(label_old, + label_new, + '' if info is None else ': {}'.format(info), + )) + + self.shapes[label_new] = self.shapes.pop(label_old) + + + def sort_by(self,labels,ascending=True): + """ + Get column data. + + Parameters + ---------- + label : str or list + Column labels. + ascending : bool or list, optional + Set sort order. + + """ + self.__label_flat() + self.data.sort_values(labels,axis=0,inplace=True,ascending=ascending) + self.__label_condensed() + self.comments.append('sorted by [{}]'.format(', '.join(labels))) + + + def to_ASCII(self,fname): + """ + Store as plain text file. + + Parameters + ---------- + fname : file, str, or pathlib.Path + Filename or file for reading. + + """ + labels = [] + for l in self.shapes: + if(self.shapes[l] == (1,)): + labels.append('{}'.format(l)) + elif(len(self.shapes[l]) == 1): + labels += ['{}_{}'.format(i+1,l) \ + for i in range(self.shapes[l][0])] + else: + labels += ['{}:{}_{}'.format('x'.join([str(d) for d in self.shapes[l]]),i+1,l) \ + for i in range(np.prod(self.shapes[l],dtype=int))] + + header = ['{} header'.format(len(self.comments)+1)] \ + + self.comments \ + + [' '.join(labels)] + + try: + f = open(fname,'w') + except TypeError: + f = fname + for line in header: f.write(line+'\n') + self.data.to_csv(f,sep=' ',index=False,header=False) diff --git a/python/tests/reference/Table/datatype-mix.txt b/python/tests/reference/Table/datatype-mix.txt new file mode 100644 index 000000000..2f6baa852 --- /dev/null +++ b/python/tests/reference/Table/datatype-mix.txt @@ -0,0 +1,4 @@ +1 header +a b +1.0 hallo +0.1 "hallo test" diff --git a/python/tests/reference/Table/whitespace-mix.txt b/python/tests/reference/Table/whitespace-mix.txt new file mode 100644 index 000000000..933a16e77 --- /dev/null +++ b/python/tests/reference/Table/whitespace-mix.txt @@ -0,0 +1,6 @@ +1 header +a b 1_c 2_c +1 2 3 4 +5 6 7 8 +9 10. 12. 12 + diff --git a/python/tests/test_Table.py b/python/tests/test_Table.py new file mode 100644 index 000000000..a0dc31975 --- /dev/null +++ b/python/tests/test_Table.py @@ -0,0 +1,128 @@ +import os + +import pytest +import numpy as np + +from damask import Table + + +@pytest.fixture +def default(): + """Simple Table.""" + x = np.ones((5,13),dtype=float) + return Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['test data','contains only ones']) + +@pytest.fixture +def reference_dir(reference_dir_base): + """Directory containing reference results.""" + return os.path.join(reference_dir_base,'Table') + +class TestTable: + + def test_get_scalar(self,default): + d = default.get('s') + assert np.allclose(d,1.0) and d.shape[1:] == (1,) + + def test_get_vector(self,default): + d = default.get('v') + assert np.allclose(d,1.0) and d.shape[1:] == (3,) + + def test_get_tensor(self,default): + d = default.get('F') + assert np.allclose(d,1.0) and d.shape[1:] == (3,3) + + def test_get_component(self,default): + d = default.get('5_F') + assert np.allclose(d,1.0) and d.shape[1:] == (1,) + + def test_write_read_str(self,default,tmpdir): + default.to_ASCII(str(tmpdir.join('default.txt'))) + new = Table.from_ASCII(str(tmpdir.join('default.txt'))) + assert all(default.data==new.data) + + def test_write_read_file(self,default,tmpdir): + with open(tmpdir.join('default.txt'),'w') as f: + default.to_ASCII(f) + with open(tmpdir.join('default.txt')) as f: + new = Table.from_ASCII(f) + assert all(default.data==new.data) + + @pytest.mark.parametrize('fname',['datatype-mix.txt','whitespace-mix.txt']) + def test_read_strange(self,reference_dir,fname): + with open(os.path.join(reference_dir,fname)) as f: + Table.from_ASCII(f) + + def test_set(self,default): + default.set('F',np.zeros((5,3,3)),'set to zero') + d=default.get('F') + assert np.allclose(d,0.0) and d.shape[1:] == (3,3) + + def test_labels(self,default): + assert default.labels == ['F','v','s'] + + def test_add(self,default): + d = np.random.random((5,9)) + default.add('nine',d,'random data') + assert np.allclose(d,default.get('nine')) + + def test_rename_equivalent(self,default): + v = default.get('v') + default.rename('v','u') + u = default.get('u') + assert np.all(v == u) + + def test_rename_gone(self,default): + default.rename('v','V') + with pytest.raises(KeyError): + default.get('v') + + def test_delete(self,default): + default.delete('v') + with pytest.raises(KeyError): + default.get('v') + + + def test_invalid_initialization(self): + x = np.random.random((5,10)) + with pytest.raises(ValueError): + Table(x,{'F':(3,3)}) + + def test_invalid_set(self,default): + x = default.get('v') + with pytest.raises(ValueError): + default.set('F',x,'does not work') + + def test_invalid_get(self,default): + with pytest.raises(KeyError): + default.get('n') + + def test_sort_scalar(self): + x = np.random.random((5,13)) + t = Table(x,{'F':(3,3),'v':(3,),'s':(1,)},['random test data']) + unsort = t.get('s') + t.sort_by('s') + sort = t.get('s') + assert np.all(np.sort(unsort,0)==sort) + + def test_sort_component(self): + x = np.random.random((5,12)) + t = Table(x,{'F':(3,3),'v':(3,)},['random test data']) + unsort = t.get('4_F') + t.sort_by('4_F') + sort = t.get('4_F') + assert np.all(np.sort(unsort,0)==sort) + + def test_sort_revert(self): + x = np.random.random((5,12)) + t = Table(x,{'F':(3,3),'v':(3,)},['random test data']) + t.sort_by('4_F',ascending=False) + sort = t.get('4_F') + assert np.all(np.sort(sort,0)==sort[::-1,:]) + + def test_sort(self): + t = Table(np.array([[0,1,],[2,1,]]), + {'v':(2,)}, + ['test data']) + t.add('s',np.array(['b','a'])) + t.sort_by('s') + assert np.all(t.get('1_v') == np.array([2,0]).reshape((2,1)))