2019-10-31 15:15:34 +05:30
|
|
|
import re
|
2020-09-14 10:34:01 +05:30
|
|
|
import copy
|
2022-01-23 18:45:25 +05:30
|
|
|
from pathlib import Path
|
2022-01-26 10:56:37 +05:30
|
|
|
from typing import Union, Tuple, List
|
2019-10-31 15:15:34 +05:30
|
|
|
|
|
|
|
import pandas as pd
|
|
|
|
import numpy as np
|
|
|
|
|
2022-01-23 18:45:25 +05:30
|
|
|
from ._typehints import FileHandle
|
2020-06-24 23:48:06 +05:30
|
|
|
from . import util
|
2019-12-22 13:34:50 +05:30
|
|
|
|
2020-03-13 05:00:49 +05:30
|
|
|
class Table:
|
2021-03-27 14:40:35 +05:30
|
|
|
"""Manipulate multi-dimensional spreadsheet-like data."""
|
2020-03-13 05:00:49 +05:30
|
|
|
|
2022-01-26 20:55:27 +05:30
|
|
|
def __init__(self,
|
|
|
|
data: np.ndarray,
|
|
|
|
shapes: dict,
|
|
|
|
comments: Union[str, list] = None):
|
2019-11-27 03:23:46 +05:30
|
|
|
"""
|
2019-12-05 09:30:26 +05:30
|
|
|
New spreadsheet.
|
2020-03-13 05:00:49 +05:30
|
|
|
|
2019-11-27 03:23:46 +05:30
|
|
|
Parameters
|
|
|
|
----------
|
2020-01-15 18:45:57 +05:30
|
|
|
data : numpy.ndarray or pandas.DataFrame
|
|
|
|
Data. Column labels from a pandas.DataFrame will be replaced.
|
2019-12-05 09:30:26 +05:30
|
|
|
shapes : dict with str:tuple pairs
|
2020-03-13 05:00:49 +05:30
|
|
|
Shapes of the columns. Example 'F':(3,3) for a deformation gradient.
|
2020-06-28 14:49:18 +05:30
|
|
|
comments : str or iterable of str, optional
|
2019-12-05 09:30:26 +05:30
|
|
|
Additional, human-readable information.
|
2020-03-13 05:00:49 +05:30
|
|
|
|
2019-11-27 03:23:46 +05:30
|
|
|
"""
|
2020-06-28 14:49:18 +05:30
|
|
|
comments_ = [comments] if isinstance(comments,str) else comments
|
|
|
|
self.comments = [] if comments_ is None else [c for c in comments_]
|
2020-01-21 11:44:43 +05:30
|
|
|
self.data = pd.DataFrame(data=data)
|
2021-02-10 23:05:13 +05:30
|
|
|
self.shapes = { k:(v,) if isinstance(v,(np.int64,np.int32,int)) else v for k,v in shapes.items() }
|
2021-04-01 00:00:07 +05:30
|
|
|
self._relabel('uniform')
|
|
|
|
|
2019-11-27 03:23:46 +05:30
|
|
|
|
2022-01-12 21:40:13 +05:30
|
|
|
def __repr__(self) -> str:
|
2020-09-18 19:53:23 +05:30
|
|
|
"""Brief overview."""
|
2021-04-01 00:00:07 +05:30
|
|
|
self._relabel('shapes')
|
|
|
|
data_repr = self.data.__repr__()
|
|
|
|
self._relabel('uniform')
|
|
|
|
return '\n'.join(['# '+c for c in self.comments])+'\n'+data_repr
|
|
|
|
|
2019-12-05 22:30:59 +05:30
|
|
|
|
2022-01-26 20:55:27 +05:30
|
|
|
def __getitem__(self,
|
|
|
|
item: Union[slice, Tuple[slice, ...]]) -> 'Table':
|
2021-04-01 00:00:07 +05:30
|
|
|
"""
|
|
|
|
Slice the Table according to item.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
item : row and/or column indexer
|
|
|
|
Slice to select from Table.
|
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
2021-04-24 10:43:36 +05:30
|
|
|
slice : damask.Table
|
2021-04-01 00:00:07 +05:30
|
|
|
Sliced part of the Table.
|
|
|
|
|
|
|
|
Examples
|
|
|
|
--------
|
|
|
|
>>> import damask
|
|
|
|
>>> import numpy as np
|
|
|
|
>>> tbl = damask.Table(data=np.arange(12).reshape((4,3)),
|
|
|
|
... shapes=dict(colA=(1,),colB=(1,),colC=(1,)))
|
|
|
|
>>> tbl['colA','colB']
|
|
|
|
colA colB
|
|
|
|
0 0 1
|
|
|
|
1 3 4
|
|
|
|
2 6 7
|
|
|
|
3 9 10
|
|
|
|
>>> tbl[::2,['colB','colA']]
|
|
|
|
colB colA
|
|
|
|
0 1 0
|
|
|
|
2 7 6
|
|
|
|
>>> tbl[1:2,'colB']
|
|
|
|
colB
|
|
|
|
1 4
|
|
|
|
2 7
|
|
|
|
|
|
|
|
"""
|
|
|
|
item = (item,slice(None,None,None)) if isinstance(item,slice) else \
|
|
|
|
item if isinstance(item[0],slice) else \
|
|
|
|
(slice(None,None,None),item)
|
|
|
|
sliced = self.data.loc[item]
|
|
|
|
cols = np.array(sliced.columns if isinstance(sliced,pd.core.frame.DataFrame) else [item[1]])
|
|
|
|
_,idx = np.unique(cols,return_index=True)
|
|
|
|
return self.__class__(data=sliced,
|
|
|
|
shapes = {k:self.shapes[k] for k in cols[np.sort(idx)]},
|
|
|
|
comments=self.comments)
|
|
|
|
|
2020-12-03 05:55:54 +05:30
|
|
|
|
2022-01-12 21:40:13 +05:30
|
|
|
def __len__(self) -> int:
|
2020-10-09 11:15:20 +05:30
|
|
|
"""Number of rows."""
|
|
|
|
return len(self.data)
|
|
|
|
|
2021-04-01 00:00:07 +05:30
|
|
|
|
2022-01-23 18:45:25 +05:30
|
|
|
def __copy__(self) -> 'Table':
|
2021-01-03 16:33:40 +05:30
|
|
|
"""Create deep copy."""
|
2020-09-14 10:34:01 +05:30
|
|
|
return copy.deepcopy(self)
|
|
|
|
|
2021-01-03 16:33:40 +05:30
|
|
|
copy = __copy__
|
2020-09-14 10:34:01 +05:30
|
|
|
|
|
|
|
|
2022-01-26 20:55:27 +05:30
|
|
|
def _label(self,
|
|
|
|
what: Union[str, List[str]],
|
|
|
|
how: str) -> List[str]:
|
2021-04-01 00:00:07 +05:30
|
|
|
"""
|
|
|
|
Expand labels according to data shape.
|
2020-03-13 05:00:49 +05:30
|
|
|
|
2021-04-01 00:00:07 +05:30
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
what : str or list
|
|
|
|
Labels to expand.
|
2022-01-25 02:39:13 +05:30
|
|
|
how : {'uniform, 'shapes', 'linear'}
|
2021-04-01 00:00:07 +05:30
|
|
|
Mode of labeling.
|
|
|
|
'uniform' ==> v v v
|
|
|
|
'shapes' ==> 3:v v v
|
|
|
|
'linear' ==> 1_v 2_v 3_v
|
2020-03-13 05:00:49 +05:30
|
|
|
|
2021-04-01 00:00:07 +05:30
|
|
|
"""
|
|
|
|
what = [what] if isinstance(what,str) else what
|
2019-12-05 19:35:50 +05:30
|
|
|
labels = []
|
2021-04-01 00:00:07 +05:30
|
|
|
for label in what:
|
|
|
|
shape = self.shapes[label]
|
|
|
|
size = np.prod(shape,dtype=int)
|
|
|
|
if how == 'uniform':
|
|
|
|
labels += [label] * size
|
|
|
|
elif how == 'shapes':
|
|
|
|
labels += [('' if size == 1 or i>0 else f'{util.srepr(shape,"x")}:')+label for i in range(size)]
|
|
|
|
elif how == 'linear':
|
|
|
|
labels += [('' if size == 1 else f'{i+1}_')+label for i in range(size)]
|
|
|
|
else:
|
|
|
|
raise KeyError
|
|
|
|
return labels
|
|
|
|
|
|
|
|
|
2022-01-27 04:07:07 +05:30
|
|
|
def _relabel(self,
|
|
|
|
how: str):
|
2021-04-01 00:00:07 +05:30
|
|
|
"""
|
|
|
|
Modify labeling of data in-place.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2022-01-25 02:39:13 +05:30
|
|
|
how : {'uniform, 'shapes', 'linear'}
|
2021-04-01 00:00:07 +05:30
|
|
|
Mode of labeling.
|
|
|
|
'uniform' ==> v v v
|
|
|
|
'shapes' ==> 3:v v v
|
|
|
|
'linear' ==> 1_v 2_v 3_v
|
|
|
|
|
|
|
|
"""
|
2022-01-12 21:40:13 +05:30
|
|
|
self.data.columns = self._label(self.shapes,how) #type: ignore
|
2019-11-27 03:23:46 +05:30
|
|
|
|
|
|
|
|
2022-01-26 20:55:27 +05:30
|
|
|
def _add_comment(self,
|
|
|
|
label: str,
|
|
|
|
shape: Tuple[int, ...],
|
|
|
|
info: str = None):
|
2020-03-21 15:37:21 +05:30
|
|
|
if info is not None:
|
2020-08-25 02:58:26 +05:30
|
|
|
specific = f'{label}{" "+str(shape) if np.prod(shape,dtype=int) > 1 else ""}: {info}'
|
|
|
|
general = util.execution_stamp('Table')
|
|
|
|
self.comments.append(f'{specific} / {general}')
|
2019-11-27 03:23:46 +05:30
|
|
|
|
2020-03-13 05:00:49 +05:30
|
|
|
|
2022-01-12 21:40:13 +05:30
|
|
|
def isclose(self,
|
2022-01-25 02:39:13 +05:30
|
|
|
other: 'Table',
|
|
|
|
rtol: float = 1e-5,
|
|
|
|
atol: float = 1e-8,
|
|
|
|
equal_nan: bool = True) -> np.ndarray:
|
2021-04-01 00:00:07 +05:30
|
|
|
"""
|
|
|
|
Report where values are approximately equal to corresponding ones of other Table.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2021-04-24 10:43:36 +05:30
|
|
|
other : damask.Table
|
2021-04-01 00:00:07 +05:30
|
|
|
Table to compare against.
|
|
|
|
rtol : float, optional
|
|
|
|
Relative tolerance of equality.
|
|
|
|
atol : float, optional
|
|
|
|
Absolute tolerance of equality.
|
|
|
|
equal_nan : bool, optional
|
|
|
|
Consider matching NaN values as equal. Defaults to True.
|
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
mask : numpy.ndarray bool
|
|
|
|
Mask indicating where corresponding table values are close.
|
|
|
|
|
|
|
|
"""
|
|
|
|
return np.isclose( self.data.to_numpy(),
|
|
|
|
other.data.to_numpy(),
|
|
|
|
rtol=rtol,
|
|
|
|
atol=atol,
|
|
|
|
equal_nan=equal_nan)
|
|
|
|
|
|
|
|
|
2022-01-12 21:40:13 +05:30
|
|
|
def allclose(self,
|
2022-01-23 18:45:25 +05:30
|
|
|
other: 'Table',
|
2022-01-12 21:40:13 +05:30
|
|
|
rtol: float = 1e-5,
|
|
|
|
atol: float = 1e-8,
|
|
|
|
equal_nan: bool = True) -> bool:
|
2021-04-01 00:00:07 +05:30
|
|
|
"""
|
|
|
|
Test whether all values are approximately equal to corresponding ones of other Table.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2021-04-24 10:43:36 +05:30
|
|
|
other : damask.Table
|
2021-04-01 00:00:07 +05:30
|
|
|
Table to compare against.
|
|
|
|
rtol : float, optional
|
|
|
|
Relative tolerance of equality.
|
|
|
|
atol : float, optional
|
|
|
|
Absolute tolerance of equality.
|
|
|
|
equal_nan : bool, optional
|
|
|
|
Consider matching NaN values as equal. Defaults to True.
|
|
|
|
|
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
answer : bool
|
|
|
|
Whether corresponding values are close between both tables.
|
|
|
|
|
|
|
|
"""
|
|
|
|
return np.allclose( self.data.to_numpy(),
|
|
|
|
other.data.to_numpy(),
|
|
|
|
rtol=rtol,
|
|
|
|
atol=atol,
|
|
|
|
equal_nan=equal_nan)
|
|
|
|
|
|
|
|
|
2019-11-27 03:23:46 +05:30
|
|
|
@staticmethod
|
2022-01-23 18:45:25 +05:30
|
|
|
def load(fname: FileHandle) -> 'Table':
|
2019-11-27 14:28:58 +05:30
|
|
|
"""
|
2020-12-04 02:28:24 +05:30
|
|
|
Load from ASCII table file.
|
2019-11-27 14:28:58 +05:30
|
|
|
|
2021-03-27 14:40:35 +05:30
|
|
|
Initial comments are marked by '#', the first non-comment line
|
2020-09-15 10:28:06 +05:30
|
|
|
containing the column labels.
|
2021-03-27 14:40:35 +05:30
|
|
|
|
|
|
|
- Vector data column labels are indicated by '1_v, 2_v, ..., n_v'.
|
|
|
|
- Tensor data column labels are indicated by '3x3:1_T, 3x3:2_T, ..., 3x3:9_T'.
|
2019-11-28 10:22:23 +05:30
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
fname : file, str, or pathlib.Path
|
|
|
|
Filename or file for reading.
|
|
|
|
|
2021-04-23 22:50:07 +05:30
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
loaded : damask.Table
|
|
|
|
Table data from file.
|
2022-01-23 18:45:25 +05:30
|
|
|
|
2019-11-27 14:28:58 +05:30
|
|
|
"""
|
2022-01-23 18:45:25 +05:30
|
|
|
f = open(fname) if isinstance(fname, (str, Path)) else fname
|
|
|
|
f.seek(0)
|
2019-11-26 18:56:25 +05:30
|
|
|
|
2021-03-27 14:40:35 +05:30
|
|
|
comments = []
|
|
|
|
line = f.readline().strip()
|
|
|
|
while line.startswith('#'):
|
|
|
|
comments.append(line.lstrip('#').strip())
|
2020-01-15 18:49:41 +05:30
|
|
|
line = f.readline().strip()
|
2021-03-27 14:40:35 +05:30
|
|
|
labels = line.split()
|
2020-03-13 05:00:49 +05:30
|
|
|
|
2019-12-05 09:30:26 +05:30
|
|
|
shapes = {}
|
2019-11-27 14:28:58 +05:30
|
|
|
for label in labels:
|
|
|
|
tensor_column = re.search(r'[0-9,x]*?:[0-9]*?_',label)
|
2019-11-26 18:56:25 +05:30
|
|
|
if tensor_column:
|
2019-11-27 14:28:58 +05:30
|
|
|
my_shape = tensor_column.group().split(':',1)[0].split('x')
|
2019-12-05 09:30:26 +05:30
|
|
|
shapes[label.split('_',1)[1]] = tuple([int(d) for d in my_shape])
|
2019-10-31 15:15:34 +05:30
|
|
|
else:
|
2019-11-27 14:28:58 +05:30
|
|
|
vector_column = re.match(r'[0-9]*?_',label)
|
2019-11-26 18:56:25 +05:30
|
|
|
if vector_column:
|
2019-12-05 09:30:26 +05:30
|
|
|
shapes[label.split('_',1)[1]] = (int(label.split('_',1)[0]),)
|
2019-10-31 15:15:34 +05:30
|
|
|
else:
|
2019-12-05 19:35:50 +05:30
|
|
|
shapes[label] = (1,)
|
2020-03-13 05:00:49 +05:30
|
|
|
|
2020-01-15 18:45:57 +05:30
|
|
|
data = pd.read_csv(f,names=list(range(len(labels))),sep=r'\s+')
|
2019-12-05 19:35:50 +05:30
|
|
|
|
2019-12-05 10:15:27 +05:30
|
|
|
return Table(data,shapes,comments)
|
|
|
|
|
2021-04-01 00:00:07 +05:30
|
|
|
|
2019-12-22 13:34:50 +05:30
|
|
|
@staticmethod
|
2022-01-23 18:45:25 +05:30
|
|
|
def load_ang(fname: FileHandle) -> 'Table':
|
2019-12-22 13:34:50 +05:30
|
|
|
"""
|
2020-12-04 02:28:24 +05:30
|
|
|
Load from ang file.
|
2019-12-22 13:34:50 +05:30
|
|
|
|
2021-04-01 00:00:07 +05:30
|
|
|
A valid TSL ang file has to have the following columns:
|
2021-03-28 15:05:40 +05:30
|
|
|
|
|
|
|
- Euler angles (Bunge notation) in radians, 3 floats, label 'eu'.
|
|
|
|
- Spatial position in meters, 2 floats, label 'pos'.
|
|
|
|
- Image quality, 1 float, label 'IQ'.
|
|
|
|
- Confidence index, 1 float, label 'CI'.
|
|
|
|
- Phase ID, 1 int, label 'ID'.
|
|
|
|
- SEM signal, 1 float, label 'intensity'.
|
|
|
|
- Fit, 1 float, label 'fit'.
|
2019-12-22 13:34:50 +05:30
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
fname : file, str, or pathlib.Path
|
|
|
|
Filename or file for reading.
|
|
|
|
|
2021-04-23 22:50:07 +05:30
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
loaded : damask.Table
|
|
|
|
Table data from file.
|
|
|
|
|
2019-12-22 13:34:50 +05:30
|
|
|
"""
|
2022-01-23 18:45:25 +05:30
|
|
|
f = open(fname) if isinstance(fname, (str, Path)) else fname
|
|
|
|
f.seek(0)
|
2020-03-13 05:00:49 +05:30
|
|
|
|
2019-12-22 13:34:50 +05:30
|
|
|
content = f.readlines()
|
|
|
|
|
2020-08-25 02:58:26 +05:30
|
|
|
comments = [util.execution_stamp('Table','from_ang')]
|
2019-12-22 13:34:50 +05:30
|
|
|
for line in content:
|
|
|
|
if line.startswith('#'):
|
2020-11-13 02:01:09 +05:30
|
|
|
comments.append(line.split('#',1)[1].strip())
|
2019-12-22 13:34:50 +05:30
|
|
|
else:
|
|
|
|
break
|
2020-03-13 05:00:49 +05:30
|
|
|
|
2019-12-22 13:34:50 +05:30
|
|
|
data = np.loadtxt(content)
|
2020-06-25 04:06:43 +05:30
|
|
|
|
|
|
|
shapes = {'eu':3, 'pos':2, 'IQ':1, 'CI':1, 'ID':1, 'intensity':1, 'fit':1}
|
|
|
|
remainder = data.shape[1]-sum(shapes.values())
|
|
|
|
if remainder > 0: # 3.8 can do: if (remainder := data.shape[1]-sum(shapes.values())) > 0
|
|
|
|
shapes['unknown'] = remainder
|
2019-12-22 13:34:50 +05:30
|
|
|
|
|
|
|
return Table(data,shapes,comments)
|
|
|
|
|
2020-01-08 20:04:21 +05:30
|
|
|
|
2019-12-05 22:30:59 +05:30
|
|
|
@property
|
2022-01-12 21:40:13 +05:30
|
|
|
def labels(self) -> List[Tuple[int, ...]]:
|
2020-11-10 01:50:56 +05:30
|
|
|
return list(self.shapes)
|
2019-12-05 10:40:27 +05:30
|
|
|
|
|
|
|
|
2022-01-26 20:55:27 +05:30
|
|
|
def get(self,
|
|
|
|
label: str) -> np.ndarray:
|
2019-12-04 09:38:52 +05:30
|
|
|
"""
|
2019-12-05 10:40:27 +05:30
|
|
|
Get column data.
|
2019-12-04 09:38:52 +05:30
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
label : str
|
2019-12-05 10:40:27 +05:30
|
|
|
Column label.
|
2019-12-04 09:38:52 +05:30
|
|
|
|
2021-03-08 21:32:27 +05:30
|
|
|
Returns
|
|
|
|
-------
|
|
|
|
data : numpy.ndarray
|
|
|
|
Array of column data.
|
|
|
|
|
2019-12-04 09:38:52 +05:30
|
|
|
"""
|
2021-04-01 03:43:07 +05:30
|
|
|
data = self.data[label].to_numpy().reshape((-1,)+self.shapes[label])
|
2019-12-05 22:30:59 +05:30
|
|
|
|
|
|
|
return data.astype(type(data.flatten()[0]))
|
2019-12-05 19:35:50 +05:30
|
|
|
|
2019-10-31 15:15:34 +05:30
|
|
|
|
2022-01-26 20:55:27 +05:30
|
|
|
def set(self,
|
|
|
|
label: str,
|
|
|
|
data: np.ndarray,
|
|
|
|
info: str = None) -> 'Table':
|
2019-11-28 10:22:23 +05:30
|
|
|
"""
|
2019-12-05 10:40:27 +05:30
|
|
|
Set column data.
|
2019-11-28 10:22:23 +05:30
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
label : str
|
2019-12-05 10:40:27 +05:30
|
|
|
Column label.
|
2021-04-23 22:50:07 +05:30
|
|
|
data : numpy.ndarray
|
2019-12-04 09:38:52 +05:30
|
|
|
New data.
|
2019-12-05 10:40:27 +05:30
|
|
|
info : str, optional
|
2019-12-04 09:38:52 +05:30
|
|
|
Human-readable information about the new data.
|
2019-11-28 10:22:23 +05:30
|
|
|
|
2021-03-08 21:32:27 +05:30
|
|
|
Returns
|
|
|
|
-------
|
2021-04-23 22:50:07 +05:30
|
|
|
updated : damask.Table
|
2021-03-08 21:32:27 +05:30
|
|
|
Updated table.
|
|
|
|
|
2019-11-28 10:22:23 +05:30
|
|
|
"""
|
2020-09-14 10:34:01 +05:30
|
|
|
dup = self.copy()
|
2022-01-13 21:45:54 +05:30
|
|
|
dup._add_comment(label, data.shape[1:], info)
|
2021-04-01 00:00:07 +05:30
|
|
|
m = re.match(r'(.*)\[((\d+,)*(\d+))\]',label)
|
|
|
|
if m:
|
|
|
|
key = m.group(1)
|
|
|
|
idx = np.ravel_multi_index(tuple(map(int,m.group(2).split(","))),
|
|
|
|
self.shapes[key])
|
|
|
|
iloc = dup.data.columns.get_loc(key).tolist().index(True) + idx
|
2020-09-14 10:34:01 +05:30
|
|
|
dup.data.iloc[:,iloc] = data
|
2020-03-13 05:00:49 +05:30
|
|
|
else:
|
2020-09-14 10:34:01 +05:30
|
|
|
dup.data[label] = data.reshape(dup.data[label].shape)
|
|
|
|
return dup
|
2019-12-05 19:35:50 +05:30
|
|
|
|
2020-11-13 02:01:09 +05:30
|
|
|
|
2022-01-26 20:55:27 +05:30
|
|
|
def add(self,
|
|
|
|
label: str,
|
|
|
|
data: np.ndarray,
|
|
|
|
info: str = None) -> 'Table':
|
2019-11-28 10:22:23 +05:30
|
|
|
"""
|
2019-12-05 10:40:27 +05:30
|
|
|
Add column data.
|
2019-12-04 09:38:52 +05:30
|
|
|
|
2019-11-28 10:22:23 +05:30
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
label : str
|
2019-12-05 10:40:27 +05:30
|
|
|
Column label.
|
2021-04-23 22:50:07 +05:30
|
|
|
data : numpy.ndarray
|
2019-12-05 10:40:27 +05:30
|
|
|
Modified data.
|
|
|
|
info : str, optional
|
|
|
|
Human-readable information about the modified data.
|
2019-11-28 10:22:23 +05:30
|
|
|
|
2021-03-08 21:32:27 +05:30
|
|
|
Returns
|
|
|
|
-------
|
2021-05-20 14:00:00 +05:30
|
|
|
updated : damask.Table
|
2021-03-08 21:32:27 +05:30
|
|
|
Updated table.
|
|
|
|
|
2019-11-28 10:22:23 +05:30
|
|
|
"""
|
2020-09-14 10:34:01 +05:30
|
|
|
dup = self.copy()
|
|
|
|
dup._add_comment(label,data.shape[1:],info)
|
2019-12-05 22:30:59 +05:30
|
|
|
|
2020-09-14 10:34:01 +05:30
|
|
|
dup.shapes[label] = data.shape[1:] if len(data.shape) > 1 else (1,)
|
2020-01-20 20:35:07 +05:30
|
|
|
size = np.prod(data.shape[1:],dtype=int)
|
2019-12-05 22:30:59 +05:30
|
|
|
new = pd.DataFrame(data=data.reshape(-1,size),
|
|
|
|
columns=[label]*size,
|
|
|
|
)
|
2020-09-14 10:34:01 +05:30
|
|
|
new.index = dup.data.index
|
|
|
|
dup.data = pd.concat([dup.data,new],axis=1)
|
|
|
|
return dup
|
2019-12-05 19:35:50 +05:30
|
|
|
|
2019-12-05 10:40:27 +05:30
|
|
|
|
2022-01-26 20:55:27 +05:30
|
|
|
def delete(self,
|
|
|
|
label: str) -> 'Table':
|
2019-12-05 11:20:06 +05:30
|
|
|
"""
|
|
|
|
Delete column data.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
label : str
|
|
|
|
Column label.
|
|
|
|
|
2021-03-08 21:32:27 +05:30
|
|
|
Returns
|
|
|
|
-------
|
2021-05-20 14:00:00 +05:30
|
|
|
updated : damask.Table
|
2021-03-08 21:32:27 +05:30
|
|
|
Updated table.
|
|
|
|
|
2019-12-05 11:20:06 +05:30
|
|
|
"""
|
2020-09-14 10:34:01 +05:30
|
|
|
dup = self.copy()
|
|
|
|
dup.data.drop(columns=label,inplace=True)
|
|
|
|
del dup.shapes[label]
|
|
|
|
return dup
|
2019-12-05 11:20:06 +05:30
|
|
|
|
2019-12-05 19:35:50 +05:30
|
|
|
|
2022-01-26 20:55:27 +05:30
|
|
|
def rename(self,
|
|
|
|
old: Union[str, List[str]],
|
|
|
|
new: Union[str, List[str]],
|
|
|
|
info: str = None) -> 'Table':
|
2019-12-05 11:20:06 +05:30
|
|
|
"""
|
|
|
|
Rename column data.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2020-09-14 10:34:01 +05:30
|
|
|
label_old : str or iterable of str
|
|
|
|
Old column label(s).
|
|
|
|
label_new : str or iterable of str
|
|
|
|
New column label(s).
|
2019-12-05 11:20:06 +05:30
|
|
|
|
2021-03-08 21:32:27 +05:30
|
|
|
Returns
|
|
|
|
-------
|
2021-05-20 14:00:00 +05:30
|
|
|
updated : damask.Table
|
2021-03-08 21:32:27 +05:30
|
|
|
Updated table.
|
|
|
|
|
2019-12-05 11:20:06 +05:30
|
|
|
"""
|
2020-09-14 10:34:01 +05:30
|
|
|
dup = self.copy()
|
|
|
|
columns = dict(zip([old] if isinstance(old,str) else old,
|
|
|
|
[new] if isinstance(new,str) else new))
|
|
|
|
dup.data.rename(columns=columns,inplace=True)
|
|
|
|
dup.comments.append(f'{old} => {new}'+('' if info is None else f': {info}'))
|
|
|
|
dup.shapes = {(label if label not in columns else columns[label]):dup.shapes[label] for label in dup.shapes}
|
|
|
|
return dup
|
2019-12-05 11:20:06 +05:30
|
|
|
|
2019-12-05 10:40:27 +05:30
|
|
|
|
2022-01-26 20:55:27 +05:30
|
|
|
def sort_by(self,
|
|
|
|
labels: Union[str, List[str]],
|
|
|
|
ascending: Union[bool, List[bool]] = True) -> 'Table':
|
2019-12-05 15:17:36 +05:30
|
|
|
"""
|
2019-12-22 22:41:01 +05:30
|
|
|
Sort table by values of given labels.
|
2019-12-05 15:17:36 +05:30
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2019-12-05 22:30:59 +05:30
|
|
|
label : str or list
|
2019-12-22 22:41:01 +05:30
|
|
|
Column labels for sorting.
|
2019-12-05 22:30:59 +05:30
|
|
|
ascending : bool or list, optional
|
2019-12-05 15:17:36 +05:30
|
|
|
Set sort order.
|
|
|
|
|
2021-03-08 21:32:27 +05:30
|
|
|
Returns
|
|
|
|
-------
|
2021-05-20 14:00:00 +05:30
|
|
|
updated : damask.Table
|
2021-03-08 21:32:27 +05:30
|
|
|
Updated table.
|
|
|
|
|
2019-12-05 15:17:36 +05:30
|
|
|
"""
|
2021-04-01 00:00:07 +05:30
|
|
|
labels_ = [labels] if isinstance(labels,str) else labels.copy()
|
|
|
|
for i,l in enumerate(labels_):
|
|
|
|
m = re.match(r'(.*)\[((\d+,)*(\d+))\]',l)
|
|
|
|
if m:
|
|
|
|
idx = np.ravel_multi_index(tuple(map(int,m.group(2).split(','))),
|
|
|
|
self.shapes[m.group(1)])
|
|
|
|
labels_[i] = f'{1+idx}_{m.group(1)}'
|
|
|
|
|
2020-09-14 10:34:01 +05:30
|
|
|
dup = self.copy()
|
2021-04-01 00:00:07 +05:30
|
|
|
dup._relabel('linear')
|
|
|
|
dup.data.sort_values(labels_,axis=0,inplace=True,ascending=ascending)
|
|
|
|
dup._relabel('uniform')
|
2020-09-14 10:34:01 +05:30
|
|
|
dup.comments.append(f'sorted {"ascending" if ascending else "descending"} by {labels}')
|
|
|
|
return dup
|
2019-12-05 15:17:36 +05:30
|
|
|
|
2019-12-05 19:35:50 +05:30
|
|
|
|
2022-01-26 20:55:27 +05:30
|
|
|
def append(self,
|
|
|
|
other: 'Table') -> 'Table':
|
2019-12-22 22:41:01 +05:30
|
|
|
"""
|
2020-01-12 04:44:35 +05:30
|
|
|
Append other table vertically (similar to numpy.vstack).
|
|
|
|
|
|
|
|
Requires matching labels/shapes and order.
|
2019-12-22 22:41:01 +05:30
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2021-04-23 22:50:07 +05:30
|
|
|
other : damask.Table
|
2020-03-15 02:23:48 +05:30
|
|
|
Table to append.
|
2019-12-22 22:41:01 +05:30
|
|
|
|
2021-03-08 21:32:27 +05:30
|
|
|
Returns
|
|
|
|
-------
|
2021-05-20 14:00:00 +05:30
|
|
|
updated : damask.Table
|
2021-04-23 22:50:07 +05:30
|
|
|
Updated table.
|
2021-03-08 21:32:27 +05:30
|
|
|
|
2019-12-22 22:41:01 +05:30
|
|
|
"""
|
|
|
|
if self.shapes != other.shapes or not self.data.columns.equals(other.data.columns):
|
|
|
|
raise KeyError('Labels or shapes or order do not match')
|
|
|
|
else:
|
2020-09-14 10:34:01 +05:30
|
|
|
dup = self.copy()
|
|
|
|
dup.data = dup.data.append(other.data,ignore_index=True)
|
|
|
|
return dup
|
2019-12-22 22:41:01 +05:30
|
|
|
|
|
|
|
|
2022-01-26 20:55:27 +05:30
|
|
|
def join(self,
|
|
|
|
other: 'Table') -> 'Table':
|
2019-12-22 22:41:01 +05:30
|
|
|
"""
|
2020-01-12 04:44:35 +05:30
|
|
|
Append other table horizontally (similar to numpy.hstack).
|
|
|
|
|
|
|
|
Requires matching number of rows and no common labels.
|
2019-12-22 22:41:01 +05:30
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
2021-04-23 22:50:07 +05:30
|
|
|
other : damask.Table
|
2020-03-15 02:23:48 +05:30
|
|
|
Table to join.
|
2019-12-22 22:41:01 +05:30
|
|
|
|
2021-03-08 21:32:27 +05:30
|
|
|
Returns
|
|
|
|
-------
|
2021-05-20 14:00:00 +05:30
|
|
|
updated : damask.Table
|
2021-04-23 22:50:07 +05:30
|
|
|
Updated table.
|
2021-03-08 21:33:18 +05:30
|
|
|
|
2019-12-22 22:41:01 +05:30
|
|
|
"""
|
|
|
|
if set(self.shapes) & set(other.shapes) or self.data.shape[0] != other.data.shape[0]:
|
2021-04-05 13:59:34 +05:30
|
|
|
raise KeyError('Duplicated keys or row count mismatch')
|
2019-12-22 22:41:01 +05:30
|
|
|
else:
|
2020-09-14 10:34:01 +05:30
|
|
|
dup = self.copy()
|
|
|
|
dup.data = dup.data.join(other.data)
|
2019-12-22 22:41:01 +05:30
|
|
|
for key in other.shapes:
|
2020-09-14 10:34:01 +05:30
|
|
|
dup.shapes[key] = other.shapes[key]
|
|
|
|
return dup
|
2019-12-22 22:41:01 +05:30
|
|
|
|
|
|
|
|
2022-01-26 20:55:27 +05:30
|
|
|
def save(self,
|
|
|
|
fname: FileHandle):
|
2019-11-28 10:22:23 +05:30
|
|
|
"""
|
2020-09-18 18:33:51 +05:30
|
|
|
Save as plain text file.
|
2019-12-04 09:38:52 +05:30
|
|
|
|
2019-11-28 10:22:23 +05:30
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
fname : file, str, or pathlib.Path
|
2020-03-18 18:19:53 +05:30
|
|
|
Filename or file for writing.
|
2019-11-28 10:22:23 +05:30
|
|
|
|
|
|
|
"""
|
2020-09-15 10:28:06 +05:30
|
|
|
labels = []
|
2022-01-23 18:52:36 +05:30
|
|
|
for l in list(dict.fromkeys(self.data.columns)):
|
2020-09-18 18:33:51 +05:30
|
|
|
if self.shapes[l] == (1,):
|
2020-09-15 10:28:06 +05:30
|
|
|
labels.append(f'{l}')
|
2020-09-18 18:33:51 +05:30
|
|
|
elif len(self.shapes[l]) == 1:
|
2020-09-15 10:28:06 +05:30
|
|
|
labels += [f'{i+1}_{l}' \
|
2020-09-18 18:33:51 +05:30
|
|
|
for i in range(self.shapes[l][0])]
|
2020-09-15 10:28:06 +05:30
|
|
|
else:
|
2020-09-18 18:33:51 +05:30
|
|
|
labels += [f'{util.srepr(self.shapes[l],"x")}:{i+1}_{l}' \
|
|
|
|
for i in range(np.prod(self.shapes[l]))]
|
2019-10-31 15:15:34 +05:30
|
|
|
|
2022-01-23 18:45:25 +05:30
|
|
|
f = open(fname,'w',newline='\n') if isinstance(fname, (str, Path)) else fname
|
2020-09-03 20:11:22 +05:30
|
|
|
|
2022-01-23 18:45:25 +05:30
|
|
|
f.write('\n'.join([f'# {c}' for c in self.comments] + [' '.join(labels)])+'\n')
|
|
|
|
self.data.to_csv(f,sep=' ',na_rep='nan',index=False,header=False)
|