fixed missing structure type in material.config, small changes in the other code

This commit is contained in:
Martin Diehl 2015-12-15 14:30:17 +00:00
parent 1d7794978d
commit 37aa559bd4
4 changed files with 101 additions and 33 deletions

View File

@ -1276,9 +1276,6 @@ subroutine lattice_init
if (LATTICE_bct_Nslip /= sum(lattice_bct_NslipSystem)) &
call IO_error(0_pInt,ext_msg = 'LATTICE_bct_Nslip')
if (LATTICE_fcc_Ntwin /= sum(lattice_fcc_NtwinSystem)) &
call IO_error(0_pInt,ext_msg = 'LATTICE_fcc_Ntwin')
if (LATTICE_bcc_Ntwin /= sum(lattice_bcc_NtwinSystem)) &
@ -1288,8 +1285,6 @@ subroutine lattice_init
if (LATTICE_bct_Ntwin /= sum(lattice_bct_NtwinSystem)) &
call IO_error(0_pInt,ext_msg = 'LATTICE_bct_Ntwin')
if (LATTICE_fcc_Ntrans /= sum(lattice_fcc_NtransSystem)) &
call IO_error(0_pInt,ext_msg = 'LATTICE_fcc_Ntrans')
if (LATTICE_bcc_Ntrans /= sum(lattice_bcc_NtransSystem)) &
@ -1319,17 +1314,17 @@ subroutine lattice_init
maxval(lattice_fcc_interactionSlipTwin), &
maxval(lattice_bcc_interactionSlipTwin), &
maxval(lattice_hex_interactionSlipTwin), &
! maxval(lattice_bct_interactionSlipTwin), &
! maxval(lattice_bct_interactionSlipTwin), &
!
maxval(lattice_fcc_interactionTwinSlip), &
maxval(lattice_bcc_interactionTwinSlip), &
maxval(lattice_hex_interactionTwinSlip), &
! maxval(lattice_bct_interactionTwinSlip), &
! maxval(lattice_bct_interactionTwinSlip), &
!
maxval(lattice_fcc_interactionTwinTwin), &
maxval(lattice_bcc_interactionTwinTwin), &
maxval(lattice_hex_interactionTwinTwin))) &
! maxval(lattice_bct_interactionTwinTwin))) &
! maxval(lattice_bct_interactionTwinTwin))) &
call IO_error(0_pInt,ext_msg = 'LATTICE_maxNinteraction')
!--------------------------------------------------------------------------------------------------

View File

@ -1,5 +1,5 @@
!--------------------------------------------------------------------------------------------------
! $Id: plastic_phenopowerlaw.f90 4457 2015-09-08 19:44:32Z MPIE\m.diehl $
! $Id$
!--------------------------------------------------------------------------------------------------
!> @author Franz Roters, Max-Planck-Institut für Eisenforschung GmbH
!> @author Philip Eisenlohr, Max-Planck-Institut für Eisenforschung GmbH
@ -170,7 +170,7 @@ subroutine plastic_phenoplus_init(fileUnit)
mainProcess: if (worldrank == 0) then
write(6,'(/,a)') ' <<<+- constitutive_'//PLASTICITY_PHENOPLUS_label//' init -+>>>'
write(6,'(a)') ' $Id: plastic_phenoplus.f90 4457 2015-09-08 19:44:32Z MPIE\m.diehl $'
write(6,'(a)') ' $Id$'
write(6,'(a15,a)') ' Current time: ',IO_timeStamp()
#include "compilation_info.f90"
endif mainProcess

View File

@ -347,7 +347,7 @@ end function vacancyflux_cahnhilliard_getDiffusion33
!--------------------------------------------------------------------------------------------------
!> @brief returns homogenized vacancy formation energy
!--------------------------------------------------------------------------------------------------
function vacancyflux_cahnhilliard_getFormationEnergy(ip,el)
real(pReal) function vacancyflux_cahnhilliard_getFormationEnergy(ip,el)
use lattice, only: &
lattice_vacancyFormationEnergy, &
lattice_vacancyVol, &
@ -362,8 +362,6 @@ function vacancyflux_cahnhilliard_getFormationEnergy(ip,el)
integer(pInt), intent(in) :: &
ip, & !< integration point number
el !< element number
real(pReal) :: &
vacancyflux_cahnhilliard_getFormationEnergy
integer(pInt) :: &
grain
@ -384,7 +382,7 @@ end function vacancyflux_cahnhilliard_getFormationEnergy
!--------------------------------------------------------------------------------------------------
!> @brief returns homogenized vacancy entropy coefficient
!--------------------------------------------------------------------------------------------------
function vacancyflux_cahnhilliard_getEntropicCoeff(ip,el)
real(pReal) function vacancyflux_cahnhilliard_getEntropicCoeff(ip,el)
use lattice, only: &
lattice_vacancyVol, &
lattice_vacancySurfaceEnergy
@ -399,8 +397,6 @@ function vacancyflux_cahnhilliard_getEntropicCoeff(ip,el)
integer(pInt), intent(in) :: &
ip, & !< integration point number
el !< element number
real(pReal) :: &
vacancyflux_cahnhilliard_getEntropicCoeff
integer(pInt) :: &
grain

View File

@ -42,27 +42,46 @@ class Test():
description = test_description+' (using class: $Id$)',
usage='./test.py [options]')
self.updateRequested = False
self.parser.add_option("-d", "--debug", action="store_true",\
dest="debug",\
help="debug run, don't calculate but use existing results")
self.parser.add_option("-p", "--pass", action="store_true",\
dest="accept",\
help="calculate results but always consider test as successfull")
self.parser.set_defaults(debug=False,
accept=False)
def execute(self):
'''
Run all variants and report first failure.
'''
if not self.testPossible(): return -1
self.clean()
self.prepareAll()
for variant in xrange(len(self.variants)):
try:
self.prepare(variant)
self.run(variant)
self.postprocess(variant)
if self.updateRequested: # update requested
self.update(variant)
elif not self.compare(variant): # no update, do comparison
return variant+1 # return culprit
except Exception as e :
logging.critical('\nWARNING:\n %s\n'%e)
return variant+1 # return culprit
return 0
if self.options.debug:
for variant in xrange(len(self.variants)):
try:
self.postprocess(variant)
if not self.compare(variant):
return variant+1 # return culprit
except Exception as e :
logging.critical('\nWARNING:\n %s\n'%e)
return variant+1 # return culprit
return 0
else:
if not self.testPossible(): return -1
self.clean()
self.prepareAll()
for variant in xrange(len(self.variants)):
try:
self.prepare(variant)
self.run(variant)
self.postprocess(variant)
if self.updateRequested: # update requested
self.update(variant)
elif not (self.options.accept or self.compare(variant)): # no update, do comparison
return variant+1 # return culprit
except Exception as e :
logging.critical('\nWARNING:\n %s\n'%e)
return variant+1 # return culprit
return 0
def testPossible(self):
'''
@ -406,6 +425,64 @@ class Test():
return maxError
def compare_TablesStatistically(self,
files = [None,None], # list of file names
columns = [None], # list of list of column labels (per file)
meanTol = 1.0e-4,
stdTol = 1.0e-6,
preFilter = 1.0e-9):
'''
calculate statistics of tables
threshold can be used to ignore small values (a negative number disables this feature)
'''
import numpy as np
from collections import Iterable
if not (isinstance(files, Iterable) and not isinstance(files, str)): # check whether list of files is requested
files = [str(files)]
tables = [damask.ASCIItable(name = filename,readonly = True) for filename in files]
for table in tables:
table.head_read()
columns += [columns[0]]*(len(files)-len(columns)) # extend to same length as files
columns = columns[:len(files)] # truncate to same length as files
for i,column in enumerate(columns):
if column is None: columns[i] = tables[i].labels # if no column is given, read all
logging.info('comparing ASCIItables statistically')
for i in xrange(len(columns)):
columns[i] = columns[0] if not columns[i] else \
([columns[i]] if not (isinstance(columns[i], Iterable) and not isinstance(columns[i], str)) else \
columns[i]
)
logging.info(files[i]+':'+','.join(columns[i]))
if len(files) < 2: return True # single table is always close to itself...
data = []
for table,labels in zip(tables,columns):
table.data_readArray(labels)
data.append(table.data)
table.close()
for i in xrange(1,len(data)):
delta = data[i]-data[i-1]
normBy = (np.abs(data[i]) + np.abs(data[i-1]))*0.5
normedDelta = np.where(normBy>preFilter,delta/normBy,0.0)
mean = np.amax(np.abs(np.mean(normedDelta,0)))
std = np.amax(np.std(normedDelta,0))
logging.info('mean: %f'%mean)
logging.info('std: %f'%std)
return (mean<meanTol) & (std < stdTol)
def compare_Tables(self,
files = [None,None], # list of file names
columns = [None], # list of list of column labels (per file)
@ -471,7 +548,7 @@ class Test():
t0 = np.where(mask,0.0,data[i-1])
t1 = np.where(mask,0.0,data[i ])
j = np.argmin(np.abs(t1)*rtol+atol-np.abs(t0-t1))
print np.amin(np.abs(t1)*rtol+atol-np.abs(t0-t1))
logging.info('%f'%np.amax(np.abs(t0-t1)/(np.abs(t1)*rtol+atol)))
logging.info('%f %f'%((t0*maximum).flatten()[j],(t1*maximum).flatten()[j]))
allclose &= np.allclose(np.where(mask,0.0,data[i-1]),
np.where(mask,0.0,data[i ]),rtol,atol) # accumulate "pessimism"