DAMASK_EICMD/python/damask/_table.py

397 lines
12 KiB
Python
Raw Normal View History

2019-10-31 15:15:34 +05:30
import re
2020-09-14 10:34:01 +05:30
import copy
2019-10-31 15:15:34 +05:30
import pandas as pd
import numpy as np
from . import util
2020-03-13 05:00:49 +05:30
class Table:
"""Store spreadsheet-like data."""
2020-03-13 05:00:49 +05:30
2019-12-05 09:30:26 +05:30
def __init__(self,data,shapes,comments=None):
"""
2019-12-05 09:30:26 +05:30
New spreadsheet.
2020-03-13 05:00:49 +05:30
Parameters
----------
data : numpy.ndarray or pandas.DataFrame
Data. Column labels from a pandas.DataFrame will be replaced.
2019-12-05 09:30:26 +05:30
shapes : dict with str:tuple pairs
2020-03-13 05:00:49 +05:30
Shapes of the columns. Example 'F':(3,3) for a deformation gradient.
comments : str or iterable of str, optional
2019-12-05 09:30:26 +05:30
Additional, human-readable information.
2020-03-13 05:00:49 +05:30
"""
comments_ = [comments] if isinstance(comments,str) else comments
self.comments = [] if comments_ is None else [c for c in comments_]
self.data = pd.DataFrame(data=data)
self.shapes = { k:(v,) if isinstance(v,(np.int,int)) else v for k,v in shapes.items() }
2020-03-18 18:19:53 +05:30
self._label_condensed()
2020-09-14 10:34:01 +05:30
def __copy__(self):
"""Copy Table."""
return copy.deepcopy(self)
def copy(self):
"""Copy Table."""
return self.__copy__()
2020-03-18 18:19:53 +05:30
def _label_flat(self):
"""Label data individually, e.g. v v v ==> 1_v 2_v 3_v."""
labels = []
for label,shape in self.shapes.items():
size = int(np.prod(shape))
labels += [('' if size == 1 else f'{i+1}_')+label for i in range(size)]
self.data.columns = labels
2020-03-13 05:00:49 +05:30
2020-03-18 18:19:53 +05:30
def _label_condensed(self):
"""Label data condensed, e.g. 1_v 2_v 3_v ==> v v v."""
2019-12-05 19:35:50 +05:30
labels = []
for label,shape in self.shapes.items():
labels += [label] * int(np.prod(shape))
self.data.columns = labels
2020-03-18 18:19:53 +05:30
def _add_comment(self,label,shape,info):
2020-03-21 15:37:21 +05:30
if info is not None:
2020-08-25 02:58:26 +05:30
specific = f'{label}{" "+str(shape) if np.prod(shape,dtype=int) > 1 else ""}: {info}'
general = util.execution_stamp('Table')
self.comments.append(f'{specific} / {general}')
2020-03-13 05:00:49 +05:30
@staticmethod
def from_ASCII(fname):
"""
Create table from ASCII file.
2020-03-18 18:19:53 +05:30
The first line can indicate the number of subsequent header lines as 'n header',
alternatively first line is the header and comments are marked by '#' ('new style').
2019-12-05 09:30:26 +05:30
Vector data column labels are indicated by '1_v, 2_v, ..., n_v'.
Tensor data column labels are indicated by '3x3:1_T, 3x3:2_T, ..., 3x3:9_T'.
Parameters
----------
fname : file, str, or pathlib.Path
Filename or file for reading.
"""
try:
f = open(fname)
except TypeError:
f = fname
f.seek(0)
try:
2020-01-26 14:47:27 +05:30
N_comment_lines,keyword = f.readline().strip().split(maxsplit=1)
if keyword != 'header':
raise ValueError
2020-01-20 17:36:32 +05:30
else:
comments = [f.readline().strip() for i in range(1,int(N_comment_lines))]
2020-01-20 17:36:32 +05:30
labels = f.readline().split()
except ValueError:
f.seek(0)
comments = []
line = f.readline().strip()
while line.startswith('#'):
2020-01-20 17:36:32 +05:30
comments.append(line.lstrip('#').strip())
line = f.readline().strip()
labels = line.split()
2020-03-13 05:00:49 +05:30
2019-12-05 09:30:26 +05:30
shapes = {}
for label in labels:
tensor_column = re.search(r'[0-9,x]*?:[0-9]*?_',label)
if tensor_column:
my_shape = tensor_column.group().split(':',1)[0].split('x')
2019-12-05 09:30:26 +05:30
shapes[label.split('_',1)[1]] = tuple([int(d) for d in my_shape])
2019-10-31 15:15:34 +05:30
else:
vector_column = re.match(r'[0-9]*?_',label)
if vector_column:
2019-12-05 09:30:26 +05:30
shapes[label.split('_',1)[1]] = (int(label.split('_',1)[0]),)
2019-10-31 15:15:34 +05:30
else:
2019-12-05 19:35:50 +05:30
shapes[label] = (1,)
2020-03-13 05:00:49 +05:30
data = pd.read_csv(f,names=list(range(len(labels))),sep=r'\s+')
2019-12-05 19:35:50 +05:30
2019-12-05 10:15:27 +05:30
return Table(data,shapes,comments)
@staticmethod
def from_ang(fname):
"""
Create table from TSL ang file.
A valid TSL ang file needs to contains the following columns:
* Euler angles (Bunge notation) in radians, 3 floats, label 'eu'.
* Spatial position in meters, 2 floats, label 'pos'.
* Image quality, 1 float, label 'IQ'.
* Confidence index, 1 float, label 'CI'.
* Phase ID, 1 int, label 'ID'.
* SEM signal, 1 float, label 'intensity'.
* Fit, 1 float, label 'fit'.
Parameters
----------
fname : file, str, or pathlib.Path
Filename or file for reading.
"""
try:
f = open(fname)
except TypeError:
f = fname
f.seek(0)
2020-03-13 05:00:49 +05:30
content = f.readlines()
2020-08-25 02:58:26 +05:30
comments = [util.execution_stamp('Table','from_ang')]
for line in content:
if line.startswith('#'):
comments.append(line.strip())
else:
break
2020-03-13 05:00:49 +05:30
data = np.loadtxt(content)
shapes = {'eu':3, 'pos':2, 'IQ':1, 'CI':1, 'ID':1, 'intensity':1, 'fit':1}
remainder = data.shape[1]-sum(shapes.values())
if remainder > 0: # 3.8 can do: if (remainder := data.shape[1]-sum(shapes.values())) > 0
shapes['unknown'] = remainder
return Table(data,shapes,comments)
2020-01-08 20:04:21 +05:30
@property
2019-12-05 10:40:27 +05:30
def labels(self):
return list(self.shapes.keys())
def get(self,label):
"""
2019-12-05 10:40:27 +05:30
Get column data.
Parameters
----------
label : str
2019-12-05 10:40:27 +05:30
Column label.
"""
if re.match(r'[0-9]*?_',label):
idx,key = label.split('_',1)
2020-03-17 16:52:48 +05:30
data = self.data[key].to_numpy()[:,int(idx)-1].reshape(-1,1)
2020-03-13 05:00:49 +05:30
else:
data = self.data[label].to_numpy().reshape((-1,)+self.shapes[label])
return data.astype(type(data.flatten()[0]))
2019-12-05 19:35:50 +05:30
2019-10-31 15:15:34 +05:30
2019-12-05 10:40:27 +05:30
def set(self,label,data,info=None):
"""
2019-12-05 10:40:27 +05:30
Set column data.
Parameters
----------
label : str
2019-12-05 10:40:27 +05:30
Column label.
data : np.ndarray
New data.
2019-12-05 10:40:27 +05:30
info : str, optional
Human-readable information about the new data.
"""
2020-09-14 10:34:01 +05:30
dup = self.copy()
dup._add_comment(label,data.shape[1:],info)
if re.match(r'[0-9]*?_',label):
idx,key = label.split('_',1)
2020-09-14 10:34:01 +05:30
iloc = dup.data.columns.get_loc(key).tolist().index(True) + int(idx) -1
dup.data.iloc[:,iloc] = data
2020-03-13 05:00:49 +05:30
else:
2020-09-14 10:34:01 +05:30
dup.data[label] = data.reshape(dup.data[label].shape)
return dup
2019-12-05 19:35:50 +05:30
2019-12-05 10:40:27 +05:30
def add(self,label,data,info=None):
"""
2019-12-05 10:40:27 +05:30
Add column data.
Parameters
----------
label : str
2019-12-05 10:40:27 +05:30
Column label.
data : np.ndarray
Modified data.
info : str, optional
Human-readable information about the modified data.
"""
2020-09-14 10:34:01 +05:30
dup = self.copy()
dup._add_comment(label,data.shape[1:],info)
2020-09-14 10:34:01 +05:30
dup.shapes[label] = data.shape[1:] if len(data.shape) > 1 else (1,)
size = np.prod(data.shape[1:],dtype=int)
new = pd.DataFrame(data=data.reshape(-1,size),
columns=[label]*size,
)
2020-09-14 10:34:01 +05:30
new.index = dup.data.index
dup.data = pd.concat([dup.data,new],axis=1)
return dup
2019-12-05 19:35:50 +05:30
2019-12-05 10:40:27 +05:30
2019-12-05 11:20:06 +05:30
def delete(self,label):
"""
Delete column data.
Parameters
----------
label : str
Column label.
"""
2020-09-14 10:34:01 +05:30
dup = self.copy()
dup.data.drop(columns=label,inplace=True)
del dup.shapes[label]
return dup
2019-12-05 11:20:06 +05:30
2019-12-05 19:35:50 +05:30
2020-09-14 10:34:01 +05:30
def rename(self,old,new,info=None):
2019-12-05 11:20:06 +05:30
"""
Rename column data.
Parameters
----------
2020-09-14 10:34:01 +05:30
label_old : str or iterable of str
Old column label(s).
label_new : str or iterable of str
New column label(s).
2019-12-05 11:20:06 +05:30
"""
2020-09-14 10:34:01 +05:30
dup = self.copy()
columns = dict(zip([old] if isinstance(old,str) else old,
[new] if isinstance(new,str) else new))
dup.data.rename(columns=columns,inplace=True)
dup.comments.append(f'{old} => {new}'+('' if info is None else f': {info}'))
dup.shapes = {(label if label not in columns else columns[label]):dup.shapes[label] for label in dup.shapes}
return dup
2019-12-05 11:20:06 +05:30
2019-12-05 10:40:27 +05:30
2019-12-05 15:17:36 +05:30
def sort_by(self,labels,ascending=True):
"""
Sort table by values of given labels.
2019-12-05 15:17:36 +05:30
Parameters
----------
label : str or list
Column labels for sorting.
ascending : bool or list, optional
2019-12-05 15:17:36 +05:30
Set sort order.
"""
2020-09-14 10:34:01 +05:30
dup = self.copy()
dup._label_flat()
dup.data.sort_values(labels,axis=0,inplace=True,ascending=ascending)
dup._label_condensed()
dup.comments.append(f'sorted {"ascending" if ascending else "descending"} by {labels}')
return dup
2019-12-05 15:17:36 +05:30
2019-12-05 19:35:50 +05:30
def append(self,other):
"""
2020-01-12 04:44:35 +05:30
Append other table vertically (similar to numpy.vstack).
Requires matching labels/shapes and order.
Parameters
----------
other : Table
Table to append.
"""
if self.shapes != other.shapes or not self.data.columns.equals(other.data.columns):
raise KeyError('Labels or shapes or order do not match')
else:
2020-09-14 10:34:01 +05:30
dup = self.copy()
dup.data = dup.data.append(other.data,ignore_index=True)
return dup
def join(self,other):
"""
2020-01-12 04:44:35 +05:30
Append other table horizontally (similar to numpy.hstack).
Requires matching number of rows and no common labels.
Parameters
----------
other : Table
Table to join.
"""
if set(self.shapes) & set(other.shapes) or self.data.shape[0] != other.data.shape[0]:
raise KeyError('Dublicated keys or row count mismatch')
else:
2020-09-14 10:34:01 +05:30
dup = self.copy()
dup.data = dup.data.join(other.data)
for key in other.shapes:
2020-09-14 10:34:01 +05:30
dup.shapes[key] = other.shapes[key]
return dup
def to_file(self,fname,format='ASCII',new_style=False):
"""
Store as plain text file.
Parameters
----------
fname : file, str, or pathlib.Path
2020-03-18 18:19:53 +05:30
Filename or file for writing.
format : {ASCII'}, optional
File format, defaults to 'ASCII'. Available formats are:
- ASCII: Plain text file, extension '.txt'.
2020-03-18 18:19:53 +05:30
new_style : Boolean, optional
Write table in new style, indicating header lines by comment sign ('#') only.
"""
def _to_ASCII(table,fname,new_style=False):
"""
Store as plain text file.
Parameters
----------
table : Table object
Table to write.
fname : file, str, or pathlib.Path
Filename or file for writing.
new_style : Boolean, optional
Write table in new style, indicating header lines by comment sign ('#') only.
"""
seen = set()
labels = []
for l in [x for x in table.data.columns if not (x in seen or seen.add(x))]:
if table.shapes[l] == (1,):
labels.append(f'{l}')
elif len(table.shapes[l]) == 1:
labels += [f'{i+1}_{l}' \
for i in range(table.shapes[l][0])]
else:
labels += [f'{util.srepr(table.shapes[l],"x")}:{i+1}_{l}' \
for i in range(np.prod(table.shapes[l]))]
2019-10-31 15:15:34 +05:30
header = [f'# {comment}' for comment in table.comments] if new_style else \
[f'{len(table.comments)+1} header'] + table.comments
2019-10-31 15:15:34 +05:30
try:
f = open(fname,'w')
except TypeError:
f = fname
for line in header + [' '.join(labels)]: f.write(line+'\n')
table.data.to_csv(f,sep=' ',na_rep='nan',index=False,header=False)
if format.lower() == 'ascii':
return _to_ASCII(self,fname,new_style)
else:
raise TypeError(f'Unknown format {format}.')