2019-02-15 03:26:39 +05:30
|
|
|
#!/usr/bin/env python3
|
2015-08-21 01:10:45 +05:30
|
|
|
# -*- coding: UTF-8 no BOM -*-
|
|
|
|
|
2016-03-01 22:55:14 +05:30
|
|
|
import os,sys
|
2015-08-21 01:10:45 +05:30
|
|
|
import numpy as np
|
|
|
|
from optparse import OptionParser
|
|
|
|
import damask
|
|
|
|
|
2016-01-27 22:36:00 +05:30
|
|
|
scriptName = os.path.splitext(os.path.basename(__file__))[0]
|
|
|
|
scriptID = ' '.join([scriptName,damask.version])
|
2015-08-21 01:10:45 +05:30
|
|
|
|
|
|
|
# --------------------------------------------------------------------
|
|
|
|
# MAIN
|
|
|
|
# --------------------------------------------------------------------
|
|
|
|
|
|
|
|
parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """
|
|
|
|
Add cumulative (sum of first to current row) values for given label(s).
|
|
|
|
""", version = scriptID)
|
|
|
|
|
|
|
|
parser.add_option('-l','--label',
|
|
|
|
dest='label',
|
|
|
|
action = 'extend', metavar = '<string LIST>',
|
|
|
|
help = 'columns to cumulate')
|
|
|
|
|
|
|
|
parser.set_defaults(label = [],
|
|
|
|
)
|
|
|
|
|
|
|
|
(options,filenames) = parser.parse_args()
|
|
|
|
|
|
|
|
if len(options.label) == 0:
|
|
|
|
parser.error('no data column(s) specified.')
|
|
|
|
|
|
|
|
# --- loop over input files -------------------------------------------------------------------------
|
|
|
|
|
|
|
|
if filenames == []: filenames = [None]
|
|
|
|
|
|
|
|
for name in filenames:
|
|
|
|
try:
|
2015-09-12 22:04:31 +05:30
|
|
|
table = damask.ASCIItable(name = name,
|
2015-08-21 01:10:45 +05:30
|
|
|
buffered = False)
|
|
|
|
except: continue
|
2015-09-24 14:54:42 +05:30
|
|
|
damask.util.report(scriptName,name)
|
2015-08-21 01:10:45 +05:30
|
|
|
|
|
|
|
# ------------------------------------------ read header ------------------------------------------
|
|
|
|
|
|
|
|
table.head_read()
|
|
|
|
|
|
|
|
# ------------------------------------------ sanity checks ----------------------------------------
|
|
|
|
|
|
|
|
errors = []
|
|
|
|
remarks = []
|
|
|
|
columns = []
|
|
|
|
dims = []
|
|
|
|
|
|
|
|
for what in options.label:
|
|
|
|
dim = table.label_dimension(what)
|
|
|
|
if dim < 0: remarks.append('column {} not found...'.format(what))
|
|
|
|
else:
|
|
|
|
dims.append(dim)
|
|
|
|
columns.append(table.label_index(what))
|
|
|
|
table.labels_append('cum({})'.format(what) if dim == 1 else
|
2016-10-25 00:46:29 +05:30
|
|
|
['{}_cum({})'.format(i+1,what) for i in range(dim)] ) # extend ASCII header with new labels
|
2015-08-21 01:10:45 +05:30
|
|
|
|
2015-09-24 14:54:42 +05:30
|
|
|
if remarks != []: damask.util.croak(remarks)
|
2015-08-21 01:10:45 +05:30
|
|
|
if errors != []:
|
2015-09-24 14:54:42 +05:30
|
|
|
damask.util.croak(errors)
|
2015-08-21 01:10:45 +05:30
|
|
|
table.close(dismiss = True)
|
|
|
|
continue
|
|
|
|
|
|
|
|
# ------------------------------------------ assemble header ---------------------------------------
|
|
|
|
|
|
|
|
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
|
|
|
|
table.head_write()
|
|
|
|
|
|
|
|
# ------------------------------------------ process data ------------------------------------------
|
|
|
|
mask = []
|
|
|
|
for col,dim in zip(columns,dims): mask += range(col,col+dim) # isolate data columns to cumulate
|
2018-01-28 04:15:53 +05:30
|
|
|
cumulated = np.zeros(len(mask),dtype=float) # prepare output field
|
2015-08-21 01:10:45 +05:30
|
|
|
|
2018-01-28 04:15:53 +05:30
|
|
|
outputAlive = True
|
|
|
|
while outputAlive and table.data_read(): # read next data line of ASCII table
|
|
|
|
for i,col in enumerate(mask):
|
|
|
|
cumulated[i] += float(table.data[col]) # cumulate values
|
|
|
|
table.data_append(cumulated)
|
2015-08-21 01:10:45 +05:30
|
|
|
|
2018-01-28 04:15:53 +05:30
|
|
|
outputAlive = table.data_write() # output processed line
|
2015-08-21 01:10:45 +05:30
|
|
|
|
|
|
|
# ------------------------------------------ output finalization -----------------------------------
|
|
|
|
|
|
|
|
table.close() # close ASCII tables
|