2019-10-31 15:15:34 +05:30
|
|
|
import re
|
|
|
|
|
|
|
|
import pandas as pd
|
|
|
|
import numpy as np
|
|
|
|
|
|
|
|
class Table():
|
2019-11-27 03:23:46 +05:30
|
|
|
"""Store spreadsheet-like data."""
|
|
|
|
|
|
|
|
def __init__(self,array,headings,comments=None):
|
|
|
|
"""
|
|
|
|
New spreadsheet data.
|
|
|
|
|
|
|
|
Parameters
|
|
|
|
----------
|
|
|
|
array : numpy.ndarray
|
|
|
|
Data.
|
|
|
|
headings : dict
|
|
|
|
Column headings. Labels as keys and shape as tuple. Example 'F':(3,3) for a deformation gradient.
|
|
|
|
comments : iterable of str, optional
|
|
|
|
Additional, human-readable information
|
|
|
|
|
|
|
|
"""
|
|
|
|
self.data = pd.DataFrame(data=array)
|
|
|
|
|
|
|
|
d = {}
|
|
|
|
i = 0
|
|
|
|
for label in headings:
|
|
|
|
for components in range(np.prod(headings[label])):
|
|
|
|
d[i] = label
|
|
|
|
i+=1
|
|
|
|
|
|
|
|
self.data.rename(columns=d,inplace=True)
|
|
|
|
|
|
|
|
if comments is None:
|
|
|
|
self.comments = []
|
|
|
|
else:
|
|
|
|
self.comments = [c for c in comments]
|
|
|
|
|
|
|
|
self.headings = headings
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def from_ASCII(fname):
|
2019-11-27 14:28:58 +05:30
|
|
|
"""
|
|
|
|
Create table from ASCII file.
|
|
|
|
|
|
|
|
The first line needs to indicate the number of subsequent header lines as 'n header'.
|
|
|
|
Vector data labels are indicated by '1_x, 2_x, ..., n_x'.
|
|
|
|
Tensor data labels are indicated by '3x3:1_x, 3x3:2_x, ..., 3x3:9_x'.
|
|
|
|
"""
|
2019-11-26 18:56:25 +05:30
|
|
|
try:
|
|
|
|
f = open(fname)
|
|
|
|
except TypeError:
|
|
|
|
f = fname
|
|
|
|
|
|
|
|
header,keyword = f.readline().split()
|
|
|
|
if keyword == 'header':
|
|
|
|
header = int(header)
|
|
|
|
else:
|
|
|
|
raise Exception
|
2019-11-27 14:28:58 +05:30
|
|
|
comments = [f.readline()[:-1] for i in range(header-1)]
|
|
|
|
labels = f.readline().split()
|
2019-11-27 03:23:46 +05:30
|
|
|
|
|
|
|
headings = {}
|
2019-11-27 14:28:58 +05:30
|
|
|
for label in labels:
|
|
|
|
tensor_column = re.search(r'[0-9,x]*?:[0-9]*?_',label)
|
2019-11-26 18:56:25 +05:30
|
|
|
if tensor_column:
|
2019-11-27 14:28:58 +05:30
|
|
|
my_shape = tensor_column.group().split(':',1)[0].split('x')
|
|
|
|
headings[label.split('_',1)[1]] = tuple([int(d) for d in my_shape])
|
2019-10-31 15:15:34 +05:30
|
|
|
else:
|
2019-11-27 14:28:58 +05:30
|
|
|
vector_column = re.match(r'[0-9]*?_',label)
|
2019-11-26 18:56:25 +05:30
|
|
|
if vector_column:
|
2019-11-27 14:28:58 +05:30
|
|
|
headings[label.split('_',1)[1]] = (int(label.split('_',1)[0]),)
|
2019-10-31 15:15:34 +05:30
|
|
|
else:
|
2019-11-27 14:28:58 +05:30
|
|
|
headings[label]=(1,)
|
2019-11-26 18:56:25 +05:30
|
|
|
|
2019-11-27 03:23:46 +05:30
|
|
|
return Table(np.loadtxt(f),headings,comments)
|
2019-10-31 15:15:34 +05:30
|
|
|
|
|
|
|
def get_array(self,label):
|
2019-11-27 14:28:58 +05:30
|
|
|
"""Return data as array."""
|
|
|
|
if re.match(r'[0-9]*?_',label):
|
|
|
|
idx,key = label.split('_',1)
|
|
|
|
return self.data[key].to_numpy()[:,int(idx)-1]
|
|
|
|
else:
|
|
|
|
return self.data[label].to_numpy().reshape((-1,)+self.headings[label])
|
2019-10-31 15:15:34 +05:30
|
|
|
|
2019-11-27 15:26:29 +05:30
|
|
|
def set_array(self,label,array):
|
|
|
|
"""Set data."""
|
|
|
|
if re.match(r'[0-9]*?_',label):
|
|
|
|
idx,key = label.split('_',1)
|
|
|
|
iloc = self.data.columns.get_loc(key).tolist().index(True) + int(idx) -1
|
|
|
|
self.data.iloc[:,iloc] = array
|
|
|
|
else:
|
2019-11-27 16:33:35 +05:30
|
|
|
self.data[label] = array.reshape(self.data[label].shape)
|
2019-11-27 15:26:29 +05:30
|
|
|
|
|
|
|
|
2019-11-27 13:13:20 +05:30
|
|
|
def get_labels(self):
|
2019-11-27 14:28:58 +05:30
|
|
|
"""Return the labels of all columns."""
|
2019-11-27 13:13:20 +05:30
|
|
|
return [label for label in self.headings]
|
2019-10-31 15:15:34 +05:30
|
|
|
|
|
|
|
def add_array(self,label,array,info):
|
2019-11-27 03:23:46 +05:30
|
|
|
if np.prod(array.shape[1:],dtype=int) == 1:
|
2019-10-31 15:15:34 +05:30
|
|
|
self.comments.append('{}: {}'.format(label,info))
|
|
|
|
else:
|
|
|
|
self.comments.append('{} {}: {}'.format(label,array.shape[1:],info))
|
|
|
|
|
2019-11-27 03:23:46 +05:30
|
|
|
self.headings[label] = array.shape[1:] if len(array.shape) > 1 else (1,)
|
|
|
|
size = np.prod(array.shape[1:],dtype=int)
|
2019-10-31 15:15:34 +05:30
|
|
|
new_data = pd.DataFrame(data=array.reshape(-1,size),
|
|
|
|
columns=[label for l in range(size)])
|
|
|
|
self.data = pd.concat([self.data,new_data],axis=1)
|
|
|
|
|
2019-11-26 18:56:25 +05:30
|
|
|
def to_ASCII(self,fname):
|
2019-10-31 15:15:34 +05:30
|
|
|
labels = []
|
2019-11-27 03:23:46 +05:30
|
|
|
for l in self.headings:
|
|
|
|
if(self.headings[l] == (1,)):
|
2019-10-31 15:15:34 +05:30
|
|
|
labels.append('{}'.format(l))
|
2019-11-27 03:23:46 +05:30
|
|
|
elif(len(self.headings[l]) == 1):
|
2019-10-31 15:15:34 +05:30
|
|
|
labels+=['{}_{}'.format(i+1,l)\
|
2019-11-27 03:23:46 +05:30
|
|
|
for i in range(self.headings[l][0])]
|
2019-10-31 15:15:34 +05:30
|
|
|
else:
|
2019-11-27 03:23:46 +05:30
|
|
|
labels+=['{}:{}_{}'.format(i+1,'x'.join([str(d) for d in self.headings[l]]),l)\
|
|
|
|
for i in range(np.prod(self.headings[l],dtype=int))]
|
2019-10-31 15:15:34 +05:30
|
|
|
|
|
|
|
header = ['{} header'.format(len(self.comments)+1)]\
|
|
|
|
+ self.comments\
|
|
|
|
+ [' '.join(labels)]
|
|
|
|
|
2019-11-26 18:56:25 +05:30
|
|
|
try:
|
|
|
|
f = open(fname,'w')
|
|
|
|
except TypeError:
|
|
|
|
f = fname
|
|
|
|
for line in header: f.write(line+'\n')
|
|
|
|
self.data.to_csv(f,sep=' ',index=False,header=False)
|