2014-04-02 00:11:14 +05:30
|
|
|
# -*- coding: UTF-8 no BOM -*-
|
|
|
|
|
2011-12-22 16:00:25 +05:30
|
|
|
# $Id$
|
|
|
|
|
2015-05-20 02:41:49 +05:30
|
|
|
import os,sys
|
2014-08-07 14:21:25 +05:30
|
|
|
import numpy as np
|
|
|
|
|
2011-12-14 01:32:26 +05:30
|
|
|
class ASCIItable():
|
|
|
|
'''
|
|
|
|
There should be a doc string here :)
|
|
|
|
'''
|
2014-08-07 14:21:25 +05:30
|
|
|
|
2011-12-14 01:32:26 +05:30
|
|
|
__slots__ = ['__IO__',
|
|
|
|
'info',
|
|
|
|
'labels',
|
|
|
|
'data',
|
|
|
|
]
|
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
2011-12-14 01:32:26 +05:30
|
|
|
def __init__(self,
|
|
|
|
fileIn = sys.stdin,
|
|
|
|
fileOut = sys.stdout,
|
2014-02-04 05:14:29 +05:30
|
|
|
buffered = False, # flush writes
|
|
|
|
labels = True): # assume table has labels
|
2011-12-14 01:32:26 +05:30
|
|
|
self.__IO__ = {'in': fileIn,
|
|
|
|
'out':fileOut,
|
|
|
|
'output':[],
|
|
|
|
'buffered':buffered,
|
2013-06-30 05:51:51 +05:30
|
|
|
'labels':labels,
|
2011-12-14 01:32:26 +05:30
|
|
|
'validReadSize': 0,
|
2014-02-04 05:14:29 +05:30
|
|
|
'readBuffer': [], # buffer to hold non-advancing reads
|
2012-01-20 02:07:53 +05:30
|
|
|
'dataStart': 0,
|
2011-12-14 01:32:26 +05:30
|
|
|
}
|
|
|
|
self.info = []
|
|
|
|
self.labels = []
|
|
|
|
self.data = []
|
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
|
|
|
|
# ------------------------------------------------------------------
|
|
|
|
def _transliterateToFloat(self,x):
|
|
|
|
try:
|
|
|
|
return float(x)
|
|
|
|
except:
|
|
|
|
return 0.0
|
|
|
|
|
2015-05-20 02:05:56 +05:30
|
|
|
# ------------------------------------------------------------------
|
2015-05-20 02:41:49 +05:30
|
|
|
def close(self,dismiss = False):
|
|
|
|
self.input_close()
|
|
|
|
self.output_close(dismiss)
|
2015-05-20 02:05:56 +05:30
|
|
|
|
2014-08-22 21:07:46 +05:30
|
|
|
# ------------------------------------------------------------------
|
|
|
|
def input_close(self):
|
2015-06-19 11:38:09 +05:30
|
|
|
try:
|
|
|
|
self.__IO__['in'].close()
|
|
|
|
except:
|
|
|
|
pass
|
2014-08-22 21:07:46 +05:30
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
2011-12-14 01:32:26 +05:30
|
|
|
def output_write(self,
|
|
|
|
what):
|
2013-12-12 08:06:05 +05:30
|
|
|
'''
|
|
|
|
aggregate a single row (string) or list of (possibly containing further lists of) rows into output
|
|
|
|
'''
|
|
|
|
if not isinstance(what, (str, unicode)):
|
|
|
|
try:
|
|
|
|
for item in what: self.output_write(item)
|
|
|
|
except:
|
|
|
|
self.__IO__['output'] += [str(what)]
|
2011-12-14 01:32:26 +05:30
|
|
|
else:
|
2013-12-12 08:06:05 +05:30
|
|
|
self.__IO__['output'] += [what]
|
|
|
|
|
|
|
|
return self.__IO__['buffered'] or self.output_flush()
|
2011-12-14 01:32:26 +05:30
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
2011-12-14 01:32:26 +05:30
|
|
|
def output_flush(self,
|
|
|
|
clear = True):
|
2012-02-17 00:12:04 +05:30
|
|
|
try:
|
|
|
|
self.__IO__['output'] == [] or self.__IO__['out'].write('\n'.join(self.__IO__['output']) + '\n')
|
2013-09-14 16:22:02 +05:30
|
|
|
except(IOError) as e:
|
2012-02-17 00:12:04 +05:30
|
|
|
return False
|
2011-12-14 01:32:26 +05:30
|
|
|
if clear: self.output_clear()
|
2012-02-17 00:12:04 +05:30
|
|
|
return True
|
2015-07-22 02:14:40 +05:30
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
2011-12-14 01:32:26 +05:30
|
|
|
def output_clear(self):
|
|
|
|
self.__IO__['output'] = []
|
|
|
|
|
2014-08-22 21:07:46 +05:30
|
|
|
# ------------------------------------------------------------------
|
2015-05-20 02:41:49 +05:30
|
|
|
def output_close(self, dismiss = False):
|
2015-06-19 11:38:09 +05:30
|
|
|
try:
|
|
|
|
self.__IO__['out'].close()
|
|
|
|
except:
|
|
|
|
pass
|
2015-06-06 16:48:39 +05:30
|
|
|
if dismiss and os.path.isfile(self.__IO__['out'].name): os.remove(self.__IO__['out'].name)
|
2014-08-22 21:07:46 +05:30
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
2011-12-14 01:32:26 +05:30
|
|
|
def head_read(self):
|
|
|
|
'''
|
2015-07-22 02:14:40 +05:30
|
|
|
get column labels by either read the first row, or
|
2011-12-14 01:32:26 +05:30
|
|
|
--if keyword "head[*]" is present-- the last line of the header
|
|
|
|
'''
|
2011-12-18 21:19:44 +05:30
|
|
|
import re
|
2011-12-14 01:32:26 +05:30
|
|
|
try:
|
|
|
|
self.__IO__['in'].seek(0)
|
|
|
|
except:
|
|
|
|
pass
|
|
|
|
firstline = self.__IO__['in'].readline()
|
2015-05-20 02:05:56 +05:30
|
|
|
m = re.search('(\d+)\s+head', firstline.lower())
|
2013-06-30 05:51:51 +05:30
|
|
|
if self.__IO__['labels']: # table features labels
|
|
|
|
if m: # found header info
|
|
|
|
self.info = [self.__IO__['in'].readline().strip() for i in xrange(1,int(m.group(1)))]
|
|
|
|
self.labels = self.__IO__['in'].readline().split()
|
|
|
|
else: # no header info (but labels)
|
|
|
|
self.labels = firstline.split()
|
2015-07-22 02:14:40 +05:30
|
|
|
|
2013-06-30 05:51:51 +05:30
|
|
|
self.__IO__['validReadSize'] = len(self.labels)
|
|
|
|
|
|
|
|
else: # no labels present in table
|
|
|
|
if m: # found header info
|
|
|
|
self.info = [self.__IO__['in'].readline().strip() for i in xrange(0,int(m.group(1)))] # all header is info
|
|
|
|
# ... without any labels
|
2012-02-16 23:33:14 +05:30
|
|
|
try:
|
2013-06-30 05:51:51 +05:30
|
|
|
self.__IO__['dataStart'] = self.__IO__['in'].tell() # current file position is at start of data
|
2013-09-14 16:22:02 +05:30
|
|
|
except(IOError):
|
2012-02-16 23:33:14 +05:30
|
|
|
pass
|
2011-12-14 01:32:26 +05:30
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
2011-12-14 01:32:26 +05:30
|
|
|
def head_write(self):
|
2012-12-07 03:16:19 +05:30
|
|
|
'''
|
|
|
|
write current header information (info + labels)
|
|
|
|
'''
|
2013-06-30 05:51:51 +05:30
|
|
|
if self.__IO__['labels']:
|
|
|
|
return self.output_write ([
|
|
|
|
'%i\theader'%(len(self.info)+1),
|
|
|
|
self.info,
|
|
|
|
'\t'.join(self.labels),
|
|
|
|
])
|
|
|
|
else:
|
|
|
|
return self.output_write ([
|
|
|
|
'%i\theader'%(len(self.info)),
|
|
|
|
self.info,
|
|
|
|
])
|
2011-12-14 01:32:26 +05:30
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
2011-12-14 01:32:26 +05:30
|
|
|
def labels_append(self,
|
2012-12-07 03:16:19 +05:30
|
|
|
what):
|
|
|
|
'''
|
2014-02-04 05:14:29 +05:30
|
|
|
add item or list to existing set of labels (and switch on labeling)
|
2012-12-07 03:16:19 +05:30
|
|
|
'''
|
2013-12-12 08:06:05 +05:30
|
|
|
if not isinstance(what, (str, unicode)):
|
|
|
|
try:
|
|
|
|
for item in what: self.labels_append(item)
|
|
|
|
except:
|
|
|
|
self.labels += [str(what)]
|
|
|
|
else:
|
|
|
|
self.labels += [what]
|
2011-12-14 01:32:26 +05:30
|
|
|
|
2013-10-08 19:24:13 +05:30
|
|
|
self.__IO__['labels'] = True # switch on processing (in particular writing) of labels
|
|
|
|
|
2013-06-30 05:51:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
|
|
|
def labels_clear(self):
|
2014-02-04 05:14:29 +05:30
|
|
|
'''
|
|
|
|
delete existing labels and switch to no labeling
|
|
|
|
'''
|
2013-06-30 05:51:51 +05:30
|
|
|
self.labels = []
|
2014-02-04 01:00:28 +05:30
|
|
|
self.__IO__['labels'] = False
|
2013-06-30 05:51:51 +05:30
|
|
|
|
2012-12-07 03:16:19 +05:30
|
|
|
# ------------------------------------------------------------------
|
2015-06-05 17:14:17 +05:30
|
|
|
def label_index(self,
|
|
|
|
labels):
|
2012-12-07 03:16:19 +05:30
|
|
|
'''
|
2015-05-21 05:32:32 +05:30
|
|
|
tell index of column label(s).
|
|
|
|
return numpy array if asked for list of labels.
|
|
|
|
transparently deals with label positions implicitly given as numbers or their headings given as strings.
|
2012-12-07 03:16:19 +05:30
|
|
|
'''
|
2015-06-08 10:32:11 +05:30
|
|
|
from collections import Iterable
|
2015-07-22 02:14:40 +05:30
|
|
|
|
2015-06-08 10:32:11 +05:30
|
|
|
if isinstance(labels, Iterable) and not isinstance(labels, str): # check whether list of labels is requested
|
2012-12-07 03:16:19 +05:30
|
|
|
idx = []
|
|
|
|
for label in labels:
|
2015-05-22 03:23:54 +05:30
|
|
|
if label != None:
|
2013-12-09 21:15:18 +05:30
|
|
|
try:
|
2015-06-08 10:32:11 +05:30
|
|
|
idx.append(int(label)) # column given as integer number?
|
2013-12-09 21:15:18 +05:30
|
|
|
except ValueError:
|
2015-05-22 03:23:54 +05:30
|
|
|
try:
|
2015-06-08 10:32:11 +05:30
|
|
|
idx.append(self.labels.index(label)) # locate string in label list
|
2015-05-22 03:23:54 +05:30
|
|
|
except ValueError:
|
2015-06-08 10:32:11 +05:30
|
|
|
try:
|
|
|
|
idx.append(self.labels.index('1_'+label)) # locate '1_'+string in label list
|
|
|
|
except ValueError:
|
|
|
|
idx.append(-1) # not found...
|
2012-12-07 03:16:19 +05:30
|
|
|
else:
|
|
|
|
try:
|
2015-05-21 05:32:32 +05:30
|
|
|
idx = int(labels)
|
|
|
|
except ValueError:
|
2013-12-09 21:15:18 +05:30
|
|
|
try:
|
|
|
|
idx = self.labels.index(labels)
|
2015-06-08 10:32:11 +05:30
|
|
|
except ValueError:
|
|
|
|
try:
|
|
|
|
idx = self.labels.index('1_'+labels) # locate '1_'+string in label list
|
|
|
|
except ValueError:
|
|
|
|
idx = None if labels == None else -1
|
2012-12-07 03:16:19 +05:30
|
|
|
|
2015-05-21 05:32:32 +05:30
|
|
|
return np.array(idx) if isinstance(idx,list) else idx
|
2012-12-07 03:16:19 +05:30
|
|
|
|
2015-06-05 17:14:17 +05:30
|
|
|
# ------------------------------------------------------------------
|
2015-06-08 10:32:11 +05:30
|
|
|
def label_dimension(self,
|
|
|
|
labels):
|
2015-06-05 17:14:17 +05:30
|
|
|
'''
|
|
|
|
tell dimension (length) of column label(s).
|
|
|
|
return numpy array if asked for list of labels.
|
|
|
|
transparently deals with label positions implicitly given as numbers or their headings given as strings.
|
|
|
|
'''
|
2015-06-08 10:32:11 +05:30
|
|
|
|
|
|
|
from collections import Iterable
|
2015-07-22 02:14:40 +05:30
|
|
|
|
2015-06-08 10:32:11 +05:30
|
|
|
if isinstance(labels, Iterable) and not isinstance(labels, str): # check whether list of labels is requested
|
2015-06-05 17:14:17 +05:30
|
|
|
dim = []
|
|
|
|
for label in labels:
|
|
|
|
if label != None:
|
2015-06-08 10:32:11 +05:30
|
|
|
myDim = -1
|
|
|
|
try: # column given as number?
|
|
|
|
idx = int(label)
|
|
|
|
myDim = 1 # if found has at least dimension 1
|
|
|
|
if self.labels[idx][:2] == '1_': # column has multidim indicator?
|
|
|
|
while idx+myDim < len(self.labels) and self.labels[idx+myDim][:2] == "%i_"%(myDim+1):
|
|
|
|
myDim += 1 # add while found
|
|
|
|
except ValueError: # column has string label
|
|
|
|
if label in self.labels: # can be directly found?
|
|
|
|
myDim = 1 # scalar by definition
|
|
|
|
elif '1_'+label in self.labels: # look for first entry of possible multidim object
|
|
|
|
idx = self.labels.index('1_'+label) # get starting column
|
|
|
|
myDim = 1 # (at least) one-dimensional
|
|
|
|
while idx+myDim < len(self.labels) and self.labels[idx+myDim][:2] == "%i_"%(myDim+1):
|
|
|
|
myDim += 1 # keep adding while going through object
|
|
|
|
|
|
|
|
dim.append(myDim)
|
2015-06-05 17:14:17 +05:30
|
|
|
else:
|
2015-06-08 10:32:11 +05:30
|
|
|
dim = -1 # assume invalid label
|
|
|
|
idx = -1
|
|
|
|
try: # column given as number?
|
2015-06-05 17:14:17 +05:30
|
|
|
idx = int(labels)
|
2015-06-08 10:32:11 +05:30
|
|
|
dim = 1 # if found has at least dimension 1
|
|
|
|
if self.labels[idx][:2] == '1_': # column has multidim indicator?
|
|
|
|
while idx+dim < len(self.labels) and self.labels[idx+dim][:2] == "%i_"%(dim+1):
|
|
|
|
dim += 1 # add as long as found
|
|
|
|
except ValueError: # column has string label
|
|
|
|
if labels in self.labels: # can be directly found?
|
|
|
|
dim = 1 # scalar by definition
|
|
|
|
elif '1_'+labels in self.labels: # look for first entry of possible multidim object
|
|
|
|
idx = self.labels.index('1_'+labels) # get starting column
|
|
|
|
dim = 1 # is (at least) one-dimensional
|
|
|
|
while idx+dim < len(self.labels) and self.labels[idx+dim][:2] == "%i_"%(dim+1):
|
|
|
|
dim += 1 # keep adding while going through object
|
2015-06-05 17:14:17 +05:30
|
|
|
|
2015-06-08 10:32:11 +05:30
|
|
|
return np.array(dim) if isinstance(dim,list) else dim
|
2015-06-05 17:14:17 +05:30
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
2011-12-14 01:32:26 +05:30
|
|
|
def info_append(self,
|
|
|
|
what):
|
2013-06-30 05:51:51 +05:30
|
|
|
'''
|
|
|
|
add item or list to existing set of infos
|
|
|
|
'''
|
2013-12-12 08:06:05 +05:30
|
|
|
if not isinstance(what, (str, unicode)):
|
|
|
|
try:
|
|
|
|
for item in what: self.info_append(item)
|
|
|
|
except:
|
|
|
|
self.info += [str(what)]
|
|
|
|
else:
|
|
|
|
self.info += [what]
|
2011-12-14 01:32:26 +05:30
|
|
|
|
2012-02-23 19:24:38 +05:30
|
|
|
# ------------------------------------------------------------------
|
|
|
|
def info_clear(self):
|
2014-02-04 05:14:29 +05:30
|
|
|
'''
|
|
|
|
delete any info block
|
|
|
|
'''
|
2012-02-23 19:24:38 +05:30
|
|
|
self.info = []
|
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
2012-01-20 02:07:53 +05:30
|
|
|
def data_rewind(self):
|
2014-02-06 22:07:45 +05:30
|
|
|
self.__IO__['in'].seek(self.__IO__['dataStart']) # position file to start of data section
|
|
|
|
self.__IO__['readBuffer'] = [] # delete any non-advancing data reads
|
2015-07-22 02:14:40 +05:30
|
|
|
|
2012-04-18 17:12:57 +05:30
|
|
|
# ------------------------------------------------------------------
|
2014-02-04 05:14:29 +05:30
|
|
|
def data_skipLines(self,count):
|
|
|
|
'''
|
|
|
|
wind forward by count number of lines
|
|
|
|
'''
|
|
|
|
for i in xrange(count):
|
|
|
|
alive = self.data_read()
|
|
|
|
|
|
|
|
return alive
|
2012-04-18 17:12:57 +05:30
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
2014-02-04 05:14:29 +05:30
|
|
|
def data_read(self,advance = True):
|
|
|
|
'''
|
|
|
|
read next line (possibly buffered) and parse it into data array
|
|
|
|
'''
|
|
|
|
if len(self.__IO__['readBuffer']) > 0:
|
|
|
|
line = self.__IO__['readBuffer'].pop(0) # take buffered content
|
|
|
|
else:
|
|
|
|
line = self.__IO__['in'].readline() # get next data row from file
|
2015-07-22 02:14:40 +05:30
|
|
|
|
2014-02-04 05:14:29 +05:30
|
|
|
if not advance:
|
|
|
|
self.__IO__['readBuffer'].append(line) # keep line just read in buffer
|
|
|
|
|
2013-06-30 05:51:51 +05:30
|
|
|
if self.__IO__['labels']:
|
|
|
|
items = line.split()[:self.__IO__['validReadSize']] # use up to valid size (label count)
|
2013-12-14 09:21:22 +05:30
|
|
|
self.data = items if len(items) == self.__IO__['validReadSize'] else [] # take if correct number of entries
|
2013-06-30 05:51:51 +05:30
|
|
|
else:
|
|
|
|
self.data = line.split() # take all
|
2015-07-22 02:14:40 +05:30
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
return self.data != []
|
2015-07-22 02:14:40 +05:30
|
|
|
|
2012-04-18 17:12:57 +05:30
|
|
|
# ------------------------------------------------------------------
|
|
|
|
def data_readLine(self,line):
|
2014-02-04 05:14:29 +05:30
|
|
|
'''
|
|
|
|
seek beginning of data and wind forward to selected line
|
|
|
|
'''
|
2012-04-18 17:12:57 +05:30
|
|
|
self.__IO__['in'].seek(self.__IO__['dataStart'])
|
2014-02-04 05:14:29 +05:30
|
|
|
for i in xrange(line-1):
|
2012-04-18 17:12:57 +05:30
|
|
|
self.__IO__['in'].readline()
|
|
|
|
self.data_read()
|
2013-12-14 09:21:22 +05:30
|
|
|
|
|
|
|
# ------------------------------------------------------------------
|
|
|
|
def data_readArray(self,
|
|
|
|
labels = []):
|
|
|
|
'''
|
|
|
|
read whole data of all (given) labels as numpy array
|
|
|
|
'''
|
|
|
|
|
2015-05-22 03:23:54 +05:30
|
|
|
if not isinstance(labels,list):
|
|
|
|
labels = [labels]
|
|
|
|
if labels == [None] or labels == []:
|
2015-07-09 09:10:15 +05:30
|
|
|
use = None # use all columns (and keep labels intact)
|
2015-05-21 05:32:32 +05:30
|
|
|
labels_missing = []
|
2015-05-14 22:37:50 +05:30
|
|
|
else:
|
2015-06-13 17:13:34 +05:30
|
|
|
indices = self.label_index(labels) # check requested labels
|
2015-05-21 05:32:32 +05:30
|
|
|
present = np.where(indices >= 0)[0] # positions in request list of labels that are present ...
|
|
|
|
missing = np.where(indices < 0)[0] # ... and missing in table
|
2015-06-13 17:13:34 +05:30
|
|
|
labels_missing = np.array(labels)[missing] # labels of missing data
|
|
|
|
|
|
|
|
columns = []
|
2015-07-22 02:14:40 +05:30
|
|
|
for i,c in enumerate(indices[present]): # for all valid labels ...
|
|
|
|
columns += range(c,c + \
|
|
|
|
(self.label_dimension(c) if str(c) != str(labels[present[i]]) \
|
|
|
|
else 1)) # ... transparently add all components unless column referenced by number
|
2015-06-13 17:13:34 +05:30
|
|
|
use = np.array(columns)
|
|
|
|
|
2015-06-19 11:38:09 +05:30
|
|
|
self.labels = list(np.array(self.labels)[use]) if use != [] else [] # ... for missing and present columns
|
2015-06-13 17:13:34 +05:30
|
|
|
self.__IO__['validReadSize'] = len(use) # update data width
|
2015-07-22 02:14:40 +05:30
|
|
|
|
2015-05-21 05:32:32 +05:30
|
|
|
try:
|
2015-05-10 16:59:11 +05:30
|
|
|
self.data_rewind() # try to wind back to start of data
|
2014-02-04 05:14:29 +05:30
|
|
|
except:
|
2015-05-10 16:59:11 +05:30
|
|
|
pass # assume/hope we are at data start already...
|
2015-05-21 05:32:32 +05:30
|
|
|
self.data = np.loadtxt(self.__IO__['in'], usecols=use,ndmin=2)
|
|
|
|
return labels_missing
|
2015-07-22 02:14:40 +05:30
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
2014-02-04 05:14:29 +05:30
|
|
|
def data_write(self,delimiter = '\t'):
|
|
|
|
'''
|
|
|
|
write current data array and report alive output back
|
|
|
|
'''
|
|
|
|
if len(self.data) == 0: return True
|
2015-07-22 02:14:40 +05:30
|
|
|
|
2011-12-14 01:32:26 +05:30
|
|
|
if isinstance(self.data[0],list):
|
2013-12-17 13:46:29 +05:30
|
|
|
return self.output_write([delimiter.join(map(str,items)) for items in self.data])
|
2011-12-14 01:32:26 +05:30
|
|
|
else:
|
2013-12-17 13:46:29 +05:30
|
|
|
return self.output_write(delimiter.join(map(str,self.data)))
|
2011-12-14 01:32:26 +05:30
|
|
|
|
2013-06-30 05:51:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
2014-02-04 05:14:29 +05:30
|
|
|
def data_writeArray(self,format = '%g',delimiter = '\t'):
|
2013-06-30 05:51:51 +05:30
|
|
|
'''
|
|
|
|
write whole numpy array data
|
|
|
|
'''
|
2014-08-07 14:21:25 +05:30
|
|
|
return np.savetxt(self.__IO__['out'],self.data,fmt = format,delimiter = delimiter)
|
2013-06-30 05:51:51 +05:30
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
2011-12-14 01:32:26 +05:30
|
|
|
def data_append(self,
|
|
|
|
what):
|
2013-12-12 08:06:05 +05:30
|
|
|
if not isinstance(what, (str, unicode)):
|
|
|
|
try:
|
|
|
|
for item in what: self.data_append(item)
|
|
|
|
except:
|
|
|
|
self.data += [str(what)]
|
2013-12-09 21:15:18 +05:30
|
|
|
else:
|
2013-12-12 08:06:05 +05:30
|
|
|
self.data += [what]
|
2012-02-02 22:43:51 +05:30
|
|
|
|
2012-02-15 20:20:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
|
|
|
def data_set(self,
|
|
|
|
what,where):
|
|
|
|
idx = -1
|
|
|
|
try:
|
|
|
|
idx = self.labels.index(where)
|
|
|
|
if len(self.data) <= idx:
|
|
|
|
self.data_append(['n/a' for i in xrange(idx+1-len(self.data))]) # grow data if too short
|
|
|
|
self.data[idx] = str(what)
|
2013-09-14 16:22:02 +05:30
|
|
|
except(ValueError):
|
2012-02-15 20:20:51 +05:30
|
|
|
pass
|
|
|
|
|
|
|
|
return idx
|
2015-07-22 02:14:40 +05:30
|
|
|
|
2012-02-15 20:20:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
|
|
|
def data_clear(self):
|
|
|
|
self.data = []
|
|
|
|
|
2012-02-02 22:43:51 +05:30
|
|
|
# ------------------------------------------------------------------
|
|
|
|
def data_asFloat(self):
|
|
|
|
return map(self._transliterateToFloat,self.data)
|