improved superclass handling and compare_Tables

This commit is contained in:
Philip Eisenlohr 2016-09-04 18:46:53 -04:00
parent 32c4a20a46
commit cde7de4e9f
1 changed files with 81 additions and 86 deletions

View File

@ -17,7 +17,15 @@ class Test():
variants = [] variants = []
def __init__(self,description = ''): def __init__(self, **kwargs):
defaults = {'description': '',
'keep': False,
'accept': False,
'update': False,
}
for arg in defaults.keys():
setattr(self,arg,kwargs.get(arg) if kwargs.get(arg) else defaults[arg])
fh = logging.FileHandler('test.log') # create file handler which logs even debug messages fh = logging.FileHandler('test.log') # create file handler which logs even debug messages
fh.setLevel(logging.DEBUG) fh.setLevel(logging.DEBUG)
@ -34,60 +42,51 @@ class Test():
logging.info('\n'.join(['+'*40, logging.info('\n'.join(['+'*40,
'-'*40, '-'*40,
'| '+description, '| '+self.description,
'-'*40, '-'*40,
])) ]))
self.dirBase = os.path.dirname(os.path.realpath(sys.modules[self.__class__.__module__].__file__)) self.dirBase = os.path.dirname(os.path.realpath(sys.modules[self.__class__.__module__].__file__))
self.parser = OptionParser(description = '{} (using class: {})'.format(description,damask.version), self.parser = OptionParser(description = '{} (Test class version: {})'.format(self.description,damask.version),
usage = './test.py [options]') usage = './test.py [options]')
self.parser.add_option("-d", "--debug", self.parser.add_option("-k", "--keep",
action = "store_true", action = "store_true",
dest = "debug", dest = "keep",
help = "debug run, don't calculate but use existing results") help = "keep current results, just run postprocessing")
self.parser.add_option("-p", "--pass", self.parser.add_option("--ok", "--accept",
action = "store_true", action = "store_true",
dest = "accept", dest = "accept",
help = "calculate results but always consider test as successfull") help = "calculate results but always consider test as successfull")
self.parser.add_option("-u", "--update",
action = "store_true", self.parser.set_defaults(keep = self.keep,
dest = "update", accept = self.accept,
help = "use current test results as new reference" update = self.update,
)
self.parser.set_defaults(debug = False,
accept = False,
update = False,
) )
def execute(self): def execute(self):
"""Run all variants and report first failure.""" """Run all variants and report first failure."""
if self.options.debug: if not self.options.keep:
for variant in xrange(len(self.variants)):
try:
self.postprocess(variant)
if not self.compare(variant):
return variant+1 # return culprit
except Exception as e :
logging.critical('\nWARNING:\n {}\n'.format(e))
return variant+1 # return culprit
return 0
else:
if not self.feasible(): return -1 if not self.feasible(): return -1
self.clean() self.clean()
self.prepareAll() self.prepareAll()
for variant,name in enumerate(self.variants): for variant,name in enumerate(self.variants):
try: try:
if not self.options.keep:
self.prepare(variant) self.prepare(variant)
self.run(variant) self.run(variant)
self.postprocess(variant) self.postprocess(variant)
if self.options.update: # update requested
self.update(variant) if self.options.update and not self.update(variant):
logging.critical('update for "{}" failed.'.format(name))
elif not (self.options.accept or self.compare(variant)): # no update, do comparison elif not (self.options.accept or self.compare(variant)): # no update, do comparison
return variant+1 # return culprit return variant+1 # return culprit
except Exception as e : except Exception as e :
logging.critical('\nWARNING:\n {}\n'.format(e)) logging.critical('exception during variant execution: {}'.format(e))
return variant+1 # return culprit return variant+1 # return culprit
return 0 return 0
@ -97,21 +96,18 @@ class Test():
def clean(self): def clean(self):
"""Delete directory tree containing current results.""" """Delete directory tree containing current results."""
status = True
try: try:
shutil.rmtree(self.dirCurrent()) shutil.rmtree(self.dirCurrent())
except: except:
logging.warning('removal of directory "{}" not possible...'.format(self.dirCurrent())) logging.warning('removal of directory "{}" not possible...'.format(self.dirCurrent()))
status = status and False
try: try:
os.mkdir(self.dirCurrent()) os.mkdir(self.dirCurrent())
return True
except: except:
logging.critical('creation of directory "{}" failed...'.format(self.dirCurrent())) logging.critical('creation of directory "{}" failed.'.format(self.dirCurrent()))
status = status and False return False
return status
def prepareAll(self): def prepareAll(self):
"""Do all necessary preparations for the whole test""" """Do all necessary preparations for the whole test"""
@ -139,8 +135,8 @@ class Test():
def update(self,variant): def update(self,variant):
"""Update reference with current results.""" """Update reference with current results."""
logging.debug('Update not necessary') logging.critical('update not supported.')
return True return False
def dirReference(self): def dirReference(self):
@ -467,17 +463,13 @@ class Test():
columns = [None], # list of list of column labels (per file) columns = [None], # list of list of column labels (per file)
rtol = 1e-5, rtol = 1e-5,
atol = 1e-8, atol = 1e-8,
preFilter = -1.0,
postFilter = -1.0,
debug = False): debug = False):
""" """compare multiple tables with np.allclose"""
compare tables with np.allclose
threshold can be used to ignore small values (a negative number disables this feature)
"""
if not (isinstance(files, Iterable) and not isinstance(files, str)): # check whether list of files is requested if not (isinstance(files, Iterable) and not isinstance(files, str)): # check whether list of files is requested
files = [str(files)] files = [str(files)]
if len(files) < 2: return True # single table is always close to itself...
tables = [damask.ASCIItable(name = filename,readonly = True) for filename in files] tables = [damask.ASCIItable(name = filename,readonly = True) for filename in files]
for table in tables: for table in tables:
table.head_read() table.head_read()
@ -486,7 +478,7 @@ class Test():
columns = columns[:len(files)] # truncate to same length as files columns = columns[:len(files)] # truncate to same length as files
for i,column in enumerate(columns): for i,column in enumerate(columns):
if column is None: columns[i] = tables[i].labels(raw = True) # if no column is given, read all if column is None: columns[i] = tables[i].labels(raw = False) # if no column is given, use all
logging.info('comparing ASCIItables') logging.info('comparing ASCIItables')
for i in xrange(len(columns)): for i in xrange(len(columns)):
@ -494,39 +486,42 @@ class Test():
([columns[i]] if not (isinstance(columns[i], Iterable) and not isinstance(columns[i], str)) else \ ([columns[i]] if not (isinstance(columns[i], Iterable) and not isinstance(columns[i], str)) else \
columns[i] columns[i]
) )
logging.info(files[i]+':'+','.join(columns[i])) logging.info(files[i]+': '+','.join(columns[i]))
if len(files) < 2: return True # single table is always close to itself... dimensions = tables[0].label_dimension(columns[0]) # width of each requested column
maximum = np.zeros_like(columns[0],dtype=float) # one magnitude per column entry
data = [] # list of feature table extracted from each file (ASCII table)
maximum = np.zeros(len(columns[0]),dtype='f') for i,(table,labels) in enumerate(zip(tables,columns)):
data = [] if np.any(dimensions != table.label_dimension(labels)): # check data object consistency
for table,labels in zip(tables,columns): logging.critical('Table {} differs in data layout.'.format(files[i]))
table.data_readArray(labels) return False
data.append(np.where(np.abs(table.data)<preFilter,np.zeros_like(table.data),table.data)) table.data_readArray(labels) # read data, ...
maximum += np.abs(table.data).max(axis=0) data.append(table.data) # ... store, ...
table.close() table.close() # ... close
for j,label in enumerate(labels): # iterate over object labels
maximum[j] = np.maximum(\
maximum[j],
np.amax(np.linalg.norm(table.data[:,table.label_indexrange(label)],
axis=1))
) # find maximum Euclidean norm across rows
maximum = np.where(maximum > 0.0, maximum, 1.0) # avoid div by zero for zero columns
maximum = np.repeat(maximum,dimensions) # spread maximum over columns of each object
maximum /= len(tables)
maximum = np.where(maximum >0.0, maximum, 1) # avoid div by zero for empty columns
for i in xrange(len(data)): for i in xrange(len(data)):
data[i] /= maximum data[i] /= maximum # normalize each table
mask = np.zeros_like(table.data,dtype='bool')
for table in data:
mask |= np.where(np.abs(table)<postFilter,True,False) # mask out (all) tiny values
if debug:
logging.debug(str(maximum))
allclose = np.absolute(data[0]-data[1]) <= (atol + rtol*np.absolute(data[1]))
for ok,valA,valB in zip(allclose,data[0],data[1]):
logging.debug('{}:\n{}\n{}'.format(ok,valA,valB))
allclose = True # start optimistic allclose = True # start optimistic
for i in xrange(1,len(data)): for i in xrange(1,len(data)):
if debug: allclose &= np.allclose(data[i-1],data[i],rtol,atol) # accumulate "pessimism"
t0 = np.where(mask,0.0,data[i-1])
t1 = np.where(mask,0.0,data[i ])
j = np.argmin(np.abs(t1)*rtol+atol-np.abs(t0-t1))
logging.info('{:f}'.format(np.amax(np.abs(t0-t1)/(np.abs(t1)*rtol+atol))))
logging.info('{:f} {:f}'.format((t0*maximum).flatten()[j],(t1*maximum).flatten()[j]))
allclose &= np.allclose(np.where(mask,0.0,data[i-1]),
np.where(mask,0.0,data[i ]),rtol,atol) # accumulate "pessimism"
return allclose return allclose