Merge branch 'new-ASCII' into grid-filters

This commit is contained in:
Martin Diehl 2019-11-28 05:12:36 +01:00
commit 98d5738fe6
13 changed files with 253 additions and 413 deletions

View File

@ -1 +1 @@
v2.0.3-1073-g6f3cb071 v2.0.3-1097-ga7fca4df

View File

@ -2,10 +2,9 @@
import os import os
import sys import sys
from io import StringIO
from optparse import OptionParser from optparse import OptionParser
import numpy as np
import damask import damask
@ -36,54 +35,15 @@ parser.set_defaults(defgrad = 'f',
) )
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: damask.util.report(scriptName,name)
table = damask.ASCIItable(name = name, buffered = False)
except:
continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------ table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
table.add_array('Cauchy',
damask.mechanics.Cauchy(table.get_array(options.defgrad).reshape(-1,3,3),
table.get_array(options.stress).reshape(-1,3,3)).reshape(-1,9),
scriptID+' '+' '.join(sys.argv[1:]))
table.head_read() table.to_ASCII(sys.stdout if name is None else name)
# ------------------------------------------ sanity checks ----------------------------------------
errors = []
column = {}
for tensor in [options.defgrad,options.stress]:
dim = table.label_dimension(tensor)
if dim < 0: errors.append('column {} not found.'.format(tensor))
elif dim != 9: errors.append('column {} is not a tensor.'.format(tensor))
else:
column[tensor] = table.label_index(tensor)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['{}_Cauchy'.format(i+1) for i in range(9)]) # extend ASCII header with new labels
table.head_write()
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
F = np.array(list(map(float,table.data[column[options.defgrad]:column[options.defgrad]+9])),'d').reshape(3,3)
P = np.array(list(map(float,table.data[column[options.stress ]:column[options.stress ]+9])),'d').reshape(3,3)
table.data_append(list(1.0/np.linalg.det(F)*np.dot(P,F.T).reshape(9))) # [Cauchy] = (1/det(F)) * [P].[F_transpose]
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)

View File

@ -2,22 +2,16 @@
import os import os
import sys import sys
from io import StringIO
from optparse import OptionParser from optparse import OptionParser
import numpy as np
import damask import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version]) scriptID = ' '.join([scriptName,damask.version])
def determinant(m):
return +m[0]*m[4]*m[8] \
+m[1]*m[5]*m[6] \
+m[2]*m[3]*m[7] \
-m[2]*m[4]*m[6] \
-m[1]*m[3]*m[8] \
-m[0]*m[5]*m[7]
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# MAIN # MAIN
@ -34,61 +28,18 @@ parser.add_option('-t','--tensor',
help = 'heading of columns containing tensor field values') help = 'heading of columns containing tensor field values')
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if filenames == []: filenames = [None]
if options.tensor is None: if options.tensor is None:
parser.error('no data column specified.') parser.error('no data column specified.')
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: damask.util.report(scriptName,name)
table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------ table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for tensor in options.tensor:
table.add_array('det({})'.format(tensor),
np.linalg.det(table.get_array(tensor).reshape(-1,3,3)),
scriptID+' '+' '.join(sys.argv[1:]))
table.head_read() table.to_ASCII(sys.stdout if name is None else name)
# ------------------------------------------ sanity checks ----------------------------------------
items = {
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'column': []},
}
errors = []
remarks = []
for type, data in items.items():
for what in data['labels']:
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type))
else:
items[type]['column'].append(table.label_index(what))
table.labels_append('det({})'.format(what)) # extend ASCII header with new labels
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write()
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
for type, data in items.items():
for column in data['column']:
table.data_append(determinant(list(map(float,table.data[column: column+data['dim']]))))
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)

View File

@ -2,6 +2,7 @@
import os import os
import sys import sys
from io import StringIO
from optparse import OptionParser from optparse import OptionParser
import damask import damask
@ -9,17 +10,6 @@ import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version]) scriptID = ' '.join([scriptName,damask.version])
oneThird = 1.0/3.0
def deviator(m,spherical = False): # Careful, do not change the value of m, its intent(inout)!
sph = oneThird*(m[0]+m[4]+m[8])
dev = [
m[0]-sph, m[1], m[2],
m[3], m[4]-sph, m[5],
m[6], m[7], m[8]-sph,
]
return dev,sph if spherical else dev
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# MAIN # MAIN
@ -40,67 +30,22 @@ parser.add_option('-s','--spherical',
help = 'report spherical part of tensor (hydrostatic component, pressure)') help = 'report spherical part of tensor (hydrostatic component, pressure)')
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.tensor is None:
parser.error('no data column specified...')
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
if options.tensor is None:
parser.error('no data column specified...')
for name in filenames: for name in filenames:
try: damask.util.report(scriptName,name)
table = damask.ASCIItable(name = name, buffered = False)
except: table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
continue for tensor in options.tensor:
damask.util.report(scriptName,name) table.add_array('dev({})'.format(tensor),
damask.mechanics.deviatoric_part(table.get_array(tensor).reshape(-1,3,3)).reshape((-1,9)),
scriptID+' '+' '.join(sys.argv[1:]))
if options.spherical:
table.add_array('sph({})'.format(tensor),
damask.mechanics.spherical_part(table.get_array(tensor).reshape(-1,3,3)),
scriptID+' '+' '.join(sys.argv[1:]))
# ------------------------------------------ read header ------------------------------------------ table.to_ASCII(sys.stdout if name is None else name)
table.head_read()
# ------------------------------------------ sanity checks ----------------------------------------
items = {
'tensor': {'dim': 9, 'shape': [3,3], 'labels':options.tensor, 'active':[], 'column': []},
}
errors = []
remarks = []
column = {}
for type, data in items.items():
for what in data['labels']:
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}.'.format(what,type))
else:
items[type]['active'].append(what)
items[type]['column'].append(table.label_index(what))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
for type, data in items.items():
for label in data['active']:
table.labels_append(['{}_dev({})'.format(i+1,label) for i in range(data['dim'])] + \
(['sph({})'.format(label)] if options.spherical else [])) # extend ASCII header with new labels
table.head_write()
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
for type, data in items.items():
for column in data['column']:
table.data_append(deviator(list(map(float,table.data[column:
column+data['dim']])),options.spherical))
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)

View File

@ -2,10 +2,8 @@
import os import os
import sys import sys
from io import StringIO
from optparse import OptionParser from optparse import OptionParser
from collections import OrderedDict
import numpy as np
import damask import damask
@ -13,15 +11,6 @@ import damask
scriptName = os.path.splitext(os.path.basename(__file__))[0] scriptName = os.path.splitext(os.path.basename(__file__))[0]
scriptID = ' '.join([scriptName,damask.version]) scriptID = ' '.join([scriptName,damask.version])
def Mises(what,tensor):
dev = tensor - np.trace(tensor)/3.0*np.eye(3)
symdev = 0.5*(dev+dev.T)
return np.sqrt(np.sum(symdev*symdev.T)*
{
'stress': 3.0/2.0,
'strain': 2.0/3.0,
}[what.lower()])
# -------------------------------------------------------------------- # --------------------------------------------------------------------
# MAIN # MAIN
@ -47,62 +36,21 @@ parser.set_defaults(strain = [],
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if options.stress is [] and options.strain is []: if options.stress is [] and options.strain is []:
parser.error('no data column specified...') parser.error('no data column specified...')
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: damask.util.report(scriptName,name)
table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------ table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for strain in options.strain:
table.head_read() table.add_array('Mises({})'.format(strain),
damask.mechanics.Mises_strain(damask.mechanics.symmetric(table.get_array(strain).reshape(-1,3,3))),
# ------------------------------------------ sanity checks ---------------------------------------- scriptID+' '+' '.join(sys.argv[1:]))
for stress in options.stress:
items = OrderedDict([ table.add_array('Mises({})'.format(stress),
('strain', {'dim': 9, 'shape': [3,3], 'labels':options.strain, 'active':[], 'column': []}), damask.mechanics.Mises_stress(damask.mechanics.symmetric(table.get_array(stress).reshape(-1,3,3))),
('stress', {'dim': 9, 'shape': [3,3], 'labels':options.stress, 'active':[], 'column': []}) scriptID+' '+' '.join(sys.argv[1:]))
])
errors = [] table.to_ASCII(sys.stdout if name is None else name)
remarks = []
for type, data in items.items():
for what in data['labels']:
dim = table.label_dimension(what)
if dim != data['dim']: remarks.append('column {} is not a {}...'.format(what,type))
else:
items[type]['active'].append(what)
items[type]['column'].append(table.label_index(what))
table.labels_append('Mises({})'.format(what)) # extend ASCII header with new labels
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write()
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
for type, data in items.items():
for column in data['column']:
table.data_append(Mises(type,
np.array(table.data[column:column+data['dim']],'d').reshape(data['shape'])))
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)

View File

@ -2,10 +2,9 @@
import os import os
import sys import sys
from io import StringIO
from optparse import OptionParser from optparse import OptionParser
import numpy as np
import damask import damask
@ -36,53 +35,16 @@ parser.set_defaults(defgrad = 'f',
) )
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: damask.util.report(scriptName,name)
table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------ table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
table.head_read() table.add_array('S',
damask.mechanics.PK2(table.get_array(options.defgrad).reshape(-1,3,3),
table.get_array(options.stress).reshape(-1,3,3)).reshape(-1,9),
scriptID+' '+' '.join(sys.argv[1:]))
# ------------------------------------------ sanity checks ---------------------------------------- table.to_ASCII(sys.stdout if name is None else name)
errors = []
column = {}
for tensor in [options.defgrad,options.stress]:
dim = table.label_dimension(tensor)
if dim < 0: errors.append('column {} not found.'.format(tensor))
elif dim != 9: errors.append('column {} is not a tensor.'.format(tensor))
else:
column[tensor] = table.label_index(tensor)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(['{}_S'.format(i+1) for i in range(9)]) # extend ASCII header with new labels
table.head_write()
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
F = np.array(list(map(float,table.data[column[options.defgrad]:column[options.defgrad]+9])),'d').reshape(3,3)
P = np.array(list(map(float,table.data[column[options.stress ]:column[options.stress ]+9])),'d').reshape(3,3)
table.data_append(list(np.dot(np.linalg.inv(F),P).reshape(9))) # [S] =[P].[F-1]
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close input ASCII table (works for stdin)

View File

@ -2,10 +2,9 @@
import os import os
import sys import sys
from io import StringIO
from optparse import OptionParser from optparse import OptionParser
import numpy as np
import damask import damask
@ -35,58 +34,17 @@ parser.set_defaults(label = [],
) )
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if len(options.label) != len(options.factor):
parser.error('number of column labels and factors do not match.')
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
if len(options.label) != len(options.factor):
parser.error('number of column labels and factors do not match.')
for name in filenames: for name in filenames:
try: damask.util.report(scriptName,name)
table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------ table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for i,label in enumerate(options.label):
table.set_array(label,table.get_array(label)*float(options.factor[i]),
scriptID+' '+' '.join(sys.argv[1:]))
table.head_read() table.to_ASCII(sys.stdout if name is None else name)
errors = []
remarks = []
columns = []
dims = []
factors = []
for what,factor in zip(options.label,options.factor):
col = table.label_index(what)
if col < 0: remarks.append('column {} not found...'.format(what,type))
else:
columns.append(col)
factors.append(float(factor))
dims.append(table.label_dimension(what))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write()
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
for col,dim,factor in zip(columns,dims,factors): # loop over items
table.data[col:col+dim] = factor * np.array(table.data[col:col+dim],'d')
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables

View File

@ -2,10 +2,9 @@
import os import os
import sys import sys
from io import StringIO
from optparse import OptionParser from optparse import OptionParser
import numpy as np
import damask import damask
@ -35,58 +34,17 @@ parser.set_defaults(label = [],
) )
(options,filenames) = parser.parse_args() (options,filenames) = parser.parse_args()
if len(options.label) != len(options.offset):
parser.error('number of column labels and offsets do not match.')
# --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
if len(options.label) != len(options.offset):
parser.error('number of column labels and offsets do not match.')
for name in filenames: for name in filenames:
try: damask.util.report(scriptName,name)
table = damask.ASCIItable(name = name,
buffered = False)
except: continue
damask.util.report(scriptName,name)
# ------------------------------------------ read header ------------------------------------------ table = damask.Table.from_ASCII(StringIO(''.join(sys.stdin.read())) if name is None else name)
for i,label in enumerate(options.label):
table.set_array(label,table.get_array(label)+float(options.offset[i]),
scriptID+' '+' '.join(sys.argv[1:]))
table.head_read() table.to_ASCII(sys.stdout if name is None else name)
errors = []
remarks = []
columns = []
dims = []
offsets = []
for what,offset in zip(options.label,options.offset):
col = table.label_index(what)
if col < 0: remarks.append('column {} not found...'.format(what,type))
else:
columns.append(col)
offsets.append(float(offset))
dims.append(table.label_dimension(what))
if remarks != []: damask.util.croak(remarks)
if errors != []:
damask.util.croak(errors)
table.close(dismiss = True)
continue
# ------------------------------------------ assemble header ---------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.head_write()
# ------------------------------------------ process data ------------------------------------------
outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table
for col,dim,offset in zip(columns,dims,offsets): # loop over items
table.data[col:col+dim] = offset + np.array(table.data[col:col+dim],'d')
outputAlive = table.data_write() # output processed line
# ------------------------------------------ output finalization -----------------------------------
table.close() # close ASCII tables

View File

@ -9,6 +9,7 @@ name = 'damask'
# classes # classes
from .environment import Environment # noqa from .environment import Environment # noqa
from .asciitable import ASCIItable # noqa from .asciitable import ASCIItable # noqa
from .table import Table # noqa
from .config import Material # noqa from .config import Material # noqa
from .colormaps import Colormap, Color # noqa from .colormaps import Colormap, Color # noqa

View File

@ -18,17 +18,17 @@ class DADF5():
""" """
# ------------------------------------------------------------------ # ------------------------------------------------------------------
def __init__(self,filename): def __init__(self,fname):
""" """
Opens an existing DADF5 file. Opens an existing DADF5 file.
Parameters Parameters
---------- ----------
filename : str fname : str
name of the DADF5 file to be openend. name of the DADF5 file to be openend.
""" """
with h5py.File(filename,'r') as f: with h5py.File(fname,'r') as f:
if f.attrs['DADF5-major'] != 0 or not 2 <= f.attrs['DADF5-minor'] <= 3: if f.attrs['DADF5-major'] != 0 or not 2 <= f.attrs['DADF5-minor'] <= 3:
raise TypeError('Unsupported DADF5 version {} '.format(f.attrs['DADF5-version'])) raise TypeError('Unsupported DADF5 version {} '.format(f.attrs['DADF5-version']))
@ -64,7 +64,7 @@ class DADF5():
'con_physics': self.con_physics, 'con_physics': self.con_physics,
'mat_physics': self.mat_physics} 'mat_physics': self.mat_physics}
self.filename = filename self.fname = fname
def __manage_visible(self,datasets,what,action): def __manage_visible(self,datasets,what,action):
@ -298,7 +298,7 @@ class DADF5():
groups = [] groups = []
with h5py.File(self.filename,'r') as f: with h5py.File(self.fname,'r') as f:
for i in self.iter_visible('increments'): for i in self.iter_visible('increments'):
for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']):
for oo in self.iter_visible(o): for oo in self.iter_visible(o):
@ -315,7 +315,7 @@ class DADF5():
def list_data(self): def list_data(self):
"""Return information on all active datasets in the file.""" """Return information on all active datasets in the file."""
message = '' message = ''
with h5py.File(self.filename,'r') as f: with h5py.File(self.fname,'r') as f:
for s,i in enumerate(self.iter_visible('increments')): for s,i in enumerate(self.iter_visible('increments')):
message+='\n{} ({}s)\n'.format(i,self.times[s]) message+='\n{} ({}s)\n'.format(i,self.times[s])
for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']): for o,p in zip(['constituents','materialpoints'],['con_physics','mat_physics']):
@ -336,7 +336,7 @@ class DADF5():
def get_dataset_location(self,label): def get_dataset_location(self,label):
"""Return the location of all active datasets with given label.""" """Return the location of all active datasets with given label."""
path = [] path = []
with h5py.File(self.filename,'r') as f: with h5py.File(self.fname,'r') as f:
for i in self.iter_visible('increments'): for i in self.iter_visible('increments'):
k = '/'.join([i,'geometry',label]) k = '/'.join([i,'geometry',label])
try: try:
@ -358,14 +358,14 @@ class DADF5():
def get_constituent_ID(self,c=0): def get_constituent_ID(self,c=0):
"""Pointwise constituent ID.""" """Pointwise constituent ID."""
with h5py.File(self.filename,'r') as f: with h5py.File(self.fname,'r') as f:
names = f['/mapping/cellResults/constituent']['Name'][:,c].astype('str') names = f['/mapping/cellResults/constituent']['Name'][:,c].astype('str')
return np.array([int(n.split('_')[0]) for n in names.tolist()],dtype=np.int32) return np.array([int(n.split('_')[0]) for n in names.tolist()],dtype=np.int32)
def get_crystal_structure(self): # ToDo: extension to multi constituents/phase def get_crystal_structure(self): # ToDo: extension to multi constituents/phase
"""Info about the crystal structure.""" """Info about the crystal structure."""
with h5py.File(self.filename,'r') as f: with h5py.File(self.fname,'r') as f:
return f[self.get_dataset_location('orientation')[0]].attrs['Lattice'].astype('str') # np.bytes_ to string return f[self.get_dataset_location('orientation')[0]].attrs['Lattice'].astype('str') # np.bytes_ to string
@ -375,7 +375,7 @@ class DADF5():
If more than one path is given, the dataset is composed of the individual contributions. If more than one path is given, the dataset is composed of the individual contributions.
""" """
with h5py.File(self.filename,'r') as f: with h5py.File(self.fname,'r') as f:
shape = (self.Nmaterialpoints,) + np.shape(f[path[0]])[1:] shape = (self.Nmaterialpoints,) + np.shape(f[path[0]])[1:]
if len(shape) == 1: shape = shape +(1,) if len(shape) == 1: shape = shape +(1,)
dataset = np.full(shape,np.nan,dtype=np.dtype(f[path[0]])) dataset = np.full(shape,np.nan,dtype=np.dtype(f[path[0]]))
@ -418,7 +418,7 @@ class DADF5():
) )
return np.concatenate((x[:,:,:,None],y[:,:,:,None],y[:,:,:,None]),axis = 3).reshape([np.product(self.grid),3]) return np.concatenate((x[:,:,:,None],y[:,:,:,None],y[:,:,:,None]),axis = 3).reshape([np.product(self.grid),3])
else: else:
with h5py.File(self.filename,'r') as f: with h5py.File(self.fname,'r') as f:
return f['geometry/x_c'][()] return f['geometry/x_c'][()]
@ -798,7 +798,7 @@ class DADF5():
todo = [] todo = []
# ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task # ToDo: It would be more memory efficient to read only from file when required, i.e. do to it in pool.add_task
for group in self.groups_with_datasets([d['label'] for d in datasets_requested]): for group in self.groups_with_datasets([d['label'] for d in datasets_requested]):
with h5py.File(self.filename,'r') as f: with h5py.File(self.fname,'r') as f:
datasets_in = {} datasets_in = {}
for d in datasets_requested: for d in datasets_requested:
loc = f[group+'/'+d['label']] loc = f[group+'/'+d['label']]
@ -813,7 +813,7 @@ class DADF5():
N_not_calculated = len(todo) N_not_calculated = len(todo)
while N_not_calculated > 0: while N_not_calculated > 0:
result = results.get() result = results.get()
with h5py.File(self.filename,'a') as f: # write to file with h5py.File(self.fname,'a') as f: # write to file
dataset_out = f[result['group']].create_dataset(result['label'],data=result['data']) dataset_out = f[result['group']].create_dataset(result['label'],data=result['data'])
for k in result['meta'].keys(): for k in result['meta'].keys():
dataset_out.attrs[k] = result['meta'][k].encode() dataset_out.attrs[k] = result['meta'][k].encode()

View File

@ -239,8 +239,8 @@ class Geom():
header.append('homogenization {}'.format(self.get_homogenization())) header.append('homogenization {}'.format(self.get_homogenization()))
return header return header
@classmethod @staticmethod
def from_file(cls,fname): def from_file(fname):
""" """
Reads a geom file. Reads a geom file.
@ -300,7 +300,7 @@ class Geom():
if not np.any(np.mod(microstructure.flatten(),1) != 0.0): # no float present if not np.any(np.mod(microstructure.flatten(),1) != 0.0): # no float present
microstructure = microstructure.astype('int') microstructure = microstructure.astype('int')
return cls(microstructure.reshape(grid),size,origin,homogenization,comments) return Geom(microstructure.reshape(grid),size,origin,homogenization,comments)
def to_file(self,fname,pack=None): def to_file(self,fname,pack=None):

View File

@ -19,7 +19,26 @@ def Cauchy(F,P):
else: else:
sigma = np.einsum('i,ijk,ilk->ijl',1.0/np.linalg.det(F),P,F) sigma = np.einsum('i,ijk,ilk->ijl',1.0/np.linalg.det(F),P,F)
return symmetric(sigma) return symmetric(sigma)
def PK2(F,P):
"""
Return 2. Piola-Kirchhoff stress calculated from 1. Piola-Kirchhoff stress and deformation gradient.
Parameters
----------
F : numpy.array of shape (:,3,3) or (3,3)
Deformation gradient.
P : numpy.array of shape (:,3,3) or (3,3)
1. Piola-Kirchhoff stress.
"""
if np.shape(F) == np.shape(P) == (3,3):
S = np.dot(np.linalg.inv(F),P)
else:
S = np.einsum('ijk,ikl->ijl',np.linalg.inv(F),P)
return S
def strain_tensor(F,t,m): def strain_tensor(F,t,m):
""" """

138
python/damask/table.py Normal file
View File

@ -0,0 +1,138 @@
import re
import pandas as pd
import numpy as np
class Table():
"""Store spreadsheet-like data."""
def __init__(self,array,headings,comments=None):
"""
New spreadsheet data.
Parameters
----------
array : numpy.ndarray
Data.
headings : dict
Column headings. Labels as keys and shape as tuple. Example 'F':(3,3) for a deformation gradient.
comments : iterable of str, optional
Additional, human-readable information
"""
self.data = pd.DataFrame(data=array)
d = {}
i = 0
for label in headings:
for components in range(np.prod(headings[label])):
d[i] = label
i+=1
self.data.rename(columns=d,inplace=True)
if comments is None:
self.comments = []
else:
self.comments = [c for c in comments]
self.headings = headings
@staticmethod
def from_ASCII(fname):
"""
Create table from ASCII file.
The first line needs to indicate the number of subsequent header lines as 'n header'.
Vector data labels are indicated by '1_x, 2_x, ..., n_x'.
Tensor data labels are indicated by '3x3:1_x, 3x3:2_x, ..., 3x3:9_x'.
"""
try:
f = open(fname)
except TypeError:
f = fname
header,keyword = f.readline().split()
if keyword == 'header':
header = int(header)
else:
raise Exception
comments = [f.readline()[:-1] for i in range(header-1)]
labels = f.readline().split()
headings = {}
for label in labels:
tensor_column = re.search(r'[0-9,x]*?:[0-9]*?_',label)
if tensor_column:
my_shape = tensor_column.group().split(':',1)[0].split('x')
headings[label.split('_',1)[1]] = tuple([int(d) for d in my_shape])
else:
vector_column = re.match(r'[0-9]*?_',label)
if vector_column:
headings[label.split('_',1)[1]] = (int(label.split('_',1)[0]),)
else:
headings[label]=(1,)
return Table(np.loadtxt(f),headings,comments)
def get_array(self,label):
"""Return data as array."""
if re.match(r'[0-9]*?_',label):
idx,key = label.split('_',1)
return self.data[key].to_numpy()[:,int(idx)-1]
else:
return self.data[label].to_numpy().reshape((-1,)+self.headings[label])
def set_array(self,label,array,info):
"""Set data."""
if np.prod(array.shape[1:],dtype=int) == 1:
self.comments.append('{}: {}'.format(label,info))
else:
self.comments.append('{} {}: {}'.format(label,array.shape[1:],info))
if re.match(r'[0-9]*?_',label):
idx,key = label.split('_',1)
iloc = self.data.columns.get_loc(key).tolist().index(True) + int(idx) -1
self.data.iloc[:,iloc] = array
else:
self.data[label] = array.reshape(self.data[label].shape)
def get_labels(self):
"""Return the labels of all columns."""
return [label for label in self.headings]
def add_array(self,label,array,info):
if np.prod(array.shape[1:],dtype=int) == 1:
self.comments.append('{}: {}'.format(label,info))
else:
self.comments.append('{} {}: {}'.format(label,array.shape[1:],info))
self.headings[label] = array.shape[1:] if len(array.shape) > 1 else (1,)
size = np.prod(array.shape[1:],dtype=int)
new_data = pd.DataFrame(data=array.reshape(-1,size),
columns=[label for l in range(size)])
self.data = pd.concat([self.data,new_data],axis=1)
def to_ASCII(self,fname):
labels = []
for l in self.headings:
if(self.headings[l] == (1,)):
labels.append('{}'.format(l))
elif(len(self.headings[l]) == 1):
labels+=['{}_{}'.format(i+1,l)\
for i in range(self.headings[l][0])]
else:
labels+=['{}:{}_{}'.format(i+1,'x'.join([str(d) for d in self.headings[l]]),l)\
for i in range(np.prod(self.headings[l],dtype=int))]
header = ['{} header'.format(len(self.comments)+1)]\
+ self.comments\
+ [' '.join(labels)]
try:
f = open(fname,'w')
except TypeError:
f = fname
for line in header: f.write(line+'\n')
self.data.to_csv(f,sep=' ',index=False,header=False)