Merge branch 'development' of magit1.mpie.de:damask/DAMASK into development

This commit is contained in:
Tias Maiti 2017-08-10 11:09:37 -04:00
commit 3860eecb7c
17 changed files with 203 additions and 294 deletions

View File

@ -1 +1 @@
./env/DAMASK.csh env/DAMASK.csh

View File

@ -1 +1 @@
./env/DAMASK.sh env/DAMASK.sh

View File

@ -1 +1 @@
./env/DAMASK.zsh env/DAMASK.zsh

97
DAMASK_prerequisits.sh Executable file
View File

@ -0,0 +1,97 @@
#!/usr/bin/env bash
OUTFILE="system_report.txt"
echo date +"%m-%d-%y" >$OUTFILE
# redirect STDOUT and STDERR to logfile
# https://stackoverflow.com/questions/11229385/redirect-all-output-in-a-bash-script-when-using-set-x^
exec > $OUTFILE 2>&1
# directory, file is not a symlink by definition
# https://stackoverflow.com/questions/59895/getting-the-source-directory-of-a-bash-script-from-within
DAMASK_ROOT="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
echo ==============================================================================================
echo DAMASK settings
echo ==============================================================================================
echo DAMASK_ROOT:
echo $DAMASK_ROOT
echo
echo Version:
cat VERSION
echo
echo Settings in CONFIG:
cat CONFIG
echo
echo ==============================================================================================
echo System
echo ==============================================================================================
uname -a
echo
echo ==============================================================================================
echo Python
echo ==============================================================================================
DEFAULT_PYTHON=python2.7
for executable in python python2 python3 python2.7; do
if [[ "$(which $executable)x" != "x" ]]; then
echo $executable version: $($executable --version 2>&1)
else
echo $executable does not exist
fi
done
echo Location of $DEFAULT_PYTHON: $(ls -la $(which $DEFAULT_PYTHON))
echo
for module in numpy scipy;do
echo ----------------------------------------------------------------------------------------------
echo $module
echo ----------------------------------------------------------------------------------------------
$DEFAULT_PYTHON -c "import $module; \
print('Version: {}'.format($module.__version__)); \
print('Location: {}'.format($module.__file__))"
done
echo ----------------------------------------------------------------------------------------------
echo vtk
echo ----------------------------------------------------------------------------------------------
$DEFAULT_PYTHON -c "import vtk; \
print('Version: {}'.format(vtk.vtkVersion.GetVTKVersion())); \
print('Location: {}'.format(vtk.__file__))"
echo ----------------------------------------------------------------------------------------------
echo h5py
echo ----------------------------------------------------------------------------------------------
$DEFAULT_PYTHON -c "import h5py; \
print('Version: {}'.format(h5py.version.version)); \
print('Location: {}'.format(h5py.__file__))"
echo
echo ==============================================================================================
echo GCC
echo ==============================================================================================
for executable in gcc g++ gfortran ;do
if [[ "$(which $executable)x" != "x" ]]; then
echo $(which $executable) version: $($executable --version 2>&1)
else
echo $executable does not exist
fi
done
echo
echo ==============================================================================================
echo Intel Compiler Suite
echo ==============================================================================================
for executable in icc icpc ifort ;do
if [[ "$(which $executable)x" != "x" ]]; then
echo $(which $executable) version: $($executable --version 2>&1)
else
echo $executable does not exist
fi
done
echo
echo ==============================================================================================
echo MPI Wrappers
echo ==============================================================================================
for executable in mpicc mpiCC mpicxx mpicxx mpifort mpif90 mpif77; do
if [[ "$(which $executable)x" != "x" ]]; then
echo $(which $executable) version: $($executable --show 2>&1)
else
echo $executable does not exist
fi
done

@ -1 +1 @@
Subproject commit 057371b82e3f5e880271b9631ace46c54280a753 Subproject commit 19a53f6229603aeafb2466b58679a1cd04fc0142

View File

@ -1 +1 @@
v2.0.1-803-gdd68374 v2.0.1-833-ga28b4b3

12
env/DAMASK.csh vendored
View File

@ -26,15 +26,11 @@ if ( "x$DAMASK_NUM_THREADS" == "x" ) then
set DAMASK_NUM_THREADS=1 set DAMASK_NUM_THREADS=1
endif endif
# according to http://software.intel.com/en-us/forums/topic/501500 # currently, there is no information that unlimited causes problems
# this seems to make sense for the stack size # still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it
if ( `which free` != "free: Command not found." ) then
set freeMem=`free -k | grep -E '(Mem|Speicher):' | awk '{print $4;}'`
set heap=` expr $freeMem / 2`
set stack=`expr $freeMem / $DAMASK_NUM_THREADS / 2`
# http://superuser.com/questions/220059/what-parameters-has-ulimit # http://superuser.com/questions/220059/what-parameters-has-ulimit
limit datasize $heap # maximum heap size (kB) limit datasize unlimited # maximum heap size (kB)
limit stacksize $stack # maximum stack size (kB) limit stacksize unlimited # maximum stack size (kB)
endif endif
if ( `limit | grep memoryuse` != "" ) then if ( `limit | grep memoryuse` != "" ) then
limit memoryuse unlimited # maximum physical memory size limit memoryuse unlimited # maximum physical memory size

31
env/DAMASK.sh vendored
View File

@ -1,9 +1,12 @@
# sets up an environment for DAMASK on bash # sets up an environment for DAMASK on bash
# usage: source DAMASK.sh # usage: source DAMASK.sh
function canonicalPath {
python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" $1
}
if [ "$OSTYPE" == "linux-gnu" ] || [ "$OSTYPE" == 'linux' ]; then if [ "$OSTYPE" == "linux-gnu" ] || [ "$OSTYPE" == 'linux' ]; then
DAMASK_ROOT=$(python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" "$(dirname $BASH_SOURCE)") DAMASK_ROOT=$(dirname $BASH_SOURCE)
else else
[[ "${BASH_SOURCE::1}" == "/" ]] && BASE="" || BASE="$(pwd)/" [[ "${BASH_SOURCE::1}" == "/" ]] && BASE="" || BASE="$(pwd)/"
STAT=$(stat "$(dirname $BASE$BASH_SOURCE)") STAT=$(stat "$(dirname $BASE$BASH_SOURCE)")
@ -12,9 +15,11 @@ fi
# transition compatibility (renamed $DAMASK_ROOT/DAMASK_env.sh to $DAMASK_ROOT/env/DAMASK.sh) # transition compatibility (renamed $DAMASK_ROOT/DAMASK_env.sh to $DAMASK_ROOT/env/DAMASK.sh)
if [ ${BASH_SOURCE##*/} == "DAMASK.sh" ]; then if [ ${BASH_SOURCE##*/} == "DAMASK.sh" ]; then
DAMASK_ROOT=$DAMASK_ROOT'/..' DAMASK_ROOT="$DAMASK_ROOT/.."
fi fi
DAMASK_ROOT=$(canonicalPath $DAMASK_ROOT)
# shorthand command to change to DAMASK_ROOT directory # shorthand command to change to DAMASK_ROOT directory
eval "function DAMASK_root() { cd $DAMASK_ROOT; }" eval "function DAMASK_root() { cd $DAMASK_ROOT; }"
@ -28,25 +33,19 @@ unset -f set
# add DAMASK_BIN if present # add DAMASK_BIN if present
[ "x$DAMASK_BIN" != "x" ] && PATH=$DAMASK_BIN:$PATH [ "x$DAMASK_BIN" != "x" ] && PATH=$DAMASK_BIN:$PATH
SOLVER=$(which DAMASK_spectral || true 2>/dev/null) SOLVER=$(type -p DAMASK_spectral || true 2>/dev/null)
[ "x$SOLVER" == "x" ] && SOLVER='Not found!' [ "x$SOLVER" == "x" ] && SOLVER='Not found!'
PROCESSING=$(which postResults || true 2>/dev/null) PROCESSING=$(type -p postResults || true 2>/dev/null)
[ "x$PROCESSING" == "x" ] && PROCESSING='Not found!' [ "x$PROCESSING" == "x" ] && PROCESSING='Not found!'
[ "x$DAMASK_NUM_THREADS" == "x" ] && DAMASK_NUM_THREADS=1 [ "x$DAMASK_NUM_THREADS" == "x" ] && DAMASK_NUM_THREADS=1
# according to http://software.intel.com/en-us/forums/topic/501500 # currently, there is no information that unlimited causes problems
# this seems to make sense for the stack size # still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it
FREE=$(type -p free 2>/dev/null)
if [ "x$FREE" != "x" ]; then
freeMem=$(free -k | grep -E '(Mem|Speicher):' | awk '{print $4;}')
# http://superuser.com/questions/220059/what-parameters-has-ulimit # http://superuser.com/questions/220059/what-parameters-has-ulimit
ulimit -d unlimited 2>/dev/null \ ulimit -d unlimited 2>/dev/null # maximum heap size (kB)
|| ulimit -d $(expr $freeMem / 2) 2>/dev/null # maximum heap size (kB) ulimit -s unlimited 2>/dev/null # maximum stack size (kB)
ulimit -s unlimited 2>/dev/null \
|| ulimit -s $(expr $freeMem / $DAMASK_NUM_THREADS / 2) 2>/dev/null # maximum stack size (kB)
fi
ulimit -v unlimited 2>/dev/null # maximum virtual memory size ulimit -v unlimited 2>/dev/null # maximum virtual memory size
ulimit -m unlimited 2>/dev/null # maximum physical memory size ulimit -m unlimited 2>/dev/null # maximum physical memory size
@ -64,8 +63,8 @@ if [ ! -z "$PS1" ]; then
echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS" echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS"
if [ "x$PETSC_DIR" != "x" ]; then if [ "x$PETSC_DIR" != "x" ]; then
echo "PETSc location $PETSC_DIR" echo "PETSc location $PETSC_DIR"
[[ $(python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" "$PETSC_DIR") == $PETSC_DIR ]] \ [[ $(canonicalPath "$PETSC_DIR") == $PETSC_DIR ]] \
|| echo " ~~> "$(python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" "$PETSC_DIR") || echo " ~~> "$(canonicalPath "$PETSC_DIR")
fi fi
echo "MSC.Marc/Mentat $MSC_ROOT" echo "MSC.Marc/Mentat $MSC_ROOT"
echo echo

30
env/DAMASK.zsh vendored
View File

@ -1,6 +1,10 @@
# sets up an environment for DAMASK on zsh # sets up an environment for DAMASK on zsh
# usage: source DAMASK.zsh # usage: source DAMASK.zsh
function canonicalPath {
python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" $1
}
# transition compatibility (renamed $DAMASK_ROOT/DAMASK_env.zsh to $DAMASK_ROOT/env/DAMASK.zsh) # transition compatibility (renamed $DAMASK_ROOT/DAMASK_env.zsh to $DAMASK_ROOT/env/DAMASK.zsh)
if [ ${0:t:r} = 'DAMASK' ]; then if [ ${0:t:r} = 'DAMASK' ]; then
DAMASK_ROOT=${0:a:h}'/..' DAMASK_ROOT=${0:a:h}'/..'
@ -21,19 +25,19 @@ unset -f set
# add DAMASK_BIN if present # add DAMASK_BIN if present
[ "x$DAMASK_BIN" != "x" ] && PATH=$DAMASK_BIN:$PATH [ "x$DAMASK_BIN" != "x" ] && PATH=$DAMASK_BIN:$PATH
SOLVER=`which DAMASK_spectral || True 2>/dev/null` SOLVER=$(type -p DAMASK_spectral || true 2>/dev/null)
PROCESSING=`which postResults || True 2>/dev/null` [ "x$SOLVER" == "x" ] && SOLVER='Not found!'
[ "x$DAMASK_NUM_THREADS" = "x" ] && DAMASK_NUM_THREADS=1
# according to http://software.intel.com/en-us/forums/topic/501500 PROCESSING=$(type -p postResults || true 2>/dev/null)
# this seems to make sense for the stack size [ "x$PROCESSING" == "x" ] && PROCESSING='Not found!'
if [ "`which free 2>/dev/null`" != "free not found" ]; then
freeMem=`free -k | grep -E '(Mem|Speicher):' | awk '{print $4;}'`
[ "x$DAMASK_NUM_THREADS" == "x" ] && DAMASK_NUM_THREADS=1
# currently, there is no information that unlimited causes problems
# still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it
# http://superuser.com/questions/220059/what-parameters-has-ulimit # http://superuser.com/questions/220059/what-parameters-has-ulimit
#ulimit -d `expr $freeMem / 2` 2>/dev/null # maximum heap size (kB) ulimit -d unlimited 2>/dev/null # maximum heap size (kB)
ulimit -s `expr $freeMem / $DAMASK_NUM_THREADS / 2` 2>/dev/null # maximum stack size (kB) ulimit -s unlimited 2>/dev/null # maximum stack size (kB)
fi
ulimit -v unlimited 2>/dev/null # maximum virtual memory size ulimit -v unlimited 2>/dev/null # maximum virtual memory size
ulimit -m unlimited 2>/dev/null # maximum physical memory size ulimit -m unlimited 2>/dev/null # maximum physical memory size
@ -51,8 +55,8 @@ if [ ! -z "$PS1" ]; then
echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS" echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS"
if [ "x$PETSC_DIR" != "x" ]; then if [ "x$PETSC_DIR" != "x" ]; then
echo "PETSc location $PETSC_DIR" echo "PETSc location $PETSC_DIR"
[[ $(python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" "$PETSC_DIR") == $PETSC_DIR ]] \ [[ $(canonicalPath "$PETSC_DIR") == $PETSC_DIR ]] \
|| echo " ~~> "$(python -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" "$PETSC_DIR") || echo " ~~> "$(canonicalPath "$PETSC_DIR")
fi fi
[[ "x$PETSC_ARCH" == "x" ]] \ [[ "x$PETSC_ARCH" == "x" ]] \
|| echo "PETSc architecture $PETSC_ARCH" || echo "PETSc architecture $PETSC_ARCH"
@ -79,7 +83,7 @@ fi
export DAMASK_NUM_THREADS export DAMASK_NUM_THREADS
export PYTHONPATH=$DAMASK_ROOT/lib:$PYTHONPATH export PYTHONPATH=$DAMASK_ROOT/lib:$PYTHONPATH
for var in BASE STAT SOLVER PROCESSING FREE DAMASK_BIN MATCH; do for var in BASE STAT SOLVER PROCESSING FREE DAMASK_BIN; do
unset "${var}" unset "${var}"
done done
for var in DAMASK MSC; do for var in DAMASK MSC; do

View File

@ -43,7 +43,7 @@ q_slip 1.0 # q-exponent in glide velocity
CLambdaSlip 10.0 # Adj. parameter controlling dislocation mean free path CLambdaSlip 10.0 # Adj. parameter controlling dislocation mean free path
D0 4.0e-5 # Vacancy diffusion prefactor [m**2/s] D0 4.0e-5 # Vacancy diffusion prefactor [m**2/s]
Qsd 4.5e-19 # Activation energy for climb [J] Qsd 4.5e-19 # Activation energy for climb [J]
Catomicvolume 1.0 # Adj. parameter controlling the atomic volume [in b] Catomicvolume 1.0 # Adj. parameter controlling the atomic volume [in b^3]
Cedgedipmindistance 1.0 # Adj. parameter controlling the minimum dipole distance [in b] Cedgedipmindistance 1.0 # Adj. parameter controlling the minimum dipole distance [in b]
atol_rho 1.0 atol_rho 1.0
interactionSlipSlip 0.122 0.122 0.625 0.07 0.137 0.122 # Interaction coefficients (Kubin et al. 2008) interactionSlipSlip 0.122 0.122 0.625 0.07 0.137 0.122 # Interaction coefficients (Kubin et al. 2008)

View File

@ -1,177 +0,0 @@
#
# System-Wide Abaqus Environment File
# -------------------------------------
standard_parallel = ALL
mp_mode = MPI
mp_file_system = (DETECT,DETECT)
mp_num_parallel_ftps = (4, 4)
mp_environment_export = ('MPI_PROPAGATE_TSTP',
'ABA_CM_BUFFERING',
'ABA_CM_BUFFERING_LIMIT',
'ABA_ITERATIVE_SOLVER_VERBOSE',
'ABA_DMPSOLVER_BWDPARALLELOFF',
'ABA_ELP_SURFACE_SPLIT',
'ABA_ELP_SUSPEND',
'ABA_HOME',
'ABA_MEMORY_MODE',
'ABA_MPI_MESSAGE_TRACKING',
'ABA_MPI_VERBOSE_LEVEL',
'ABA_PATH',
'ABAQUS_CSE_RELTIMETOLERANCE',
'ABA_RESOURCE_MONITOR',
'ABA_RESOURCE_USEMALLINFO',
'ABAQUS_LANG',
'ABAQUS_CSE_CURRCONFIGMAPPING',
'ABAQUS_MPF_DIAGNOSTIC_LEVEL',
'ABAQUSLM_LICENSE_FILE',
'ABQ_CRTMALLOC',
'ABQ_DATACHECK',
'ABQ_RECOVER',
'ABQ_RESTART',
'ABQ_SPLITFILE',
'ABQ_XPL_WINDOWDUMP',
'ABQ_XPL_PARTITIONSIZE',
'ABQLMHANGLIMIT',
'ABQLMQUEUE',
'ABQLMUSER',
'CCI_RENDEZVOUS',
'DOMAIN',
'DOMAIN_CPUS',
'DOUBLE_PRECISION',
'FLEXLM_DIAGNOSTICS',
'FOR0006',
'FOR0064',
'FOR_IGNORE_EXCEPTIONS',
'FOR_DISABLE_DIAGNOSTIC_DISPLAY',
'LD_PRELOAD',
'MP_NUMBER_OF_THREADS',
'MPC_GANG',
'MPI_FLAGS',
'MPI_FLUSH_FCACHE',
'MPI_RDMA_NENVELOPE',
'MPI_SOCKBUFSIZE',
'MPI_USE_MALLOPT_MMAP_MAX',
'MPI_USE_MALLOPT_MMAP_THRESHOLD',
'MPI_USE_MALLOPT_SBRK_PROTECTION',
'MPI_WORKDIR',
'MPCCI_DEBUG',
'MPCCI_CODEID',
'MPCCI_JOBID',
'MPCCI_NETDEVICE',
'MPCCI_TINFO',
'MPCCI_SERVER',
'ABAQUS_CCI_DEBUG',
'NCPUS',
'OMP_DYNAMIC',
'OMP_NUM_THREADS',
'OUTDIR',
'PAIDUP',
'PARALLEL_METHOD',
'RAIDEV_NDREG_LAZYMEM',
'ABA_SYMBOLIC_GENERALCOLLAPSE',
'ABA_SYMBOLIC_GENERAL_MAXCLIQUERANK',
'ABA_ADM_MINIMUMINCREASE',
'ABA_ADM_MINIMUMDECREASE',
'IPATH_NO_CPUAFFINITY',
'MALLOC_MMAP_THRESHOLD_',
'ABA_EXT_SIMOUTPUT',
'SMA_WS',
'SMA_PARENT',
'SMA_PLATFORM',
'ABA_PRE_DECOMPOSITION',
'ACML_FAST_MALLOC',
'ACML_FAST_MALLOC_CHUNK_SIZE',
'ACML_FAST_MALLOC_MAX_CHUNKS',
'ACML_FAST_MALLOC_DEBUG')
import driverUtils, os
#-*- mode: python -*-
# #
# Compile and Link command settings for the Windows 64 Platform #
# ( AMD Opteron / Intel EM64T ) #
# #
compile_fortran=['ifort',
'/c','/DABQ_WIN86_64', '/u',
'/iface:cref', '/recursive', '/Qauto-scalar',
'/QxSSE3', '/QaxAVX',
'/heap-arrays:1',
# '/Od', '/Ob0' # <-- Optimization
# '/Zi', # <-- Debugging
'/include:%I', '/free', '/O1', '/fpp', '/openmp', '/Qmkl']
link_sl=['LINK',
'/nologo', '/NOENTRY', '/INCREMENTAL:NO', '/subsystem:console', '/machine:AMD64',
'/NODEFAULTLIB:LIBC.LIB', '/NODEFAULTLIB:LIBCMT.LIB',
'/DEFAULTLIB:OLDNAMES.LIB', '/DEFAULTLIB:LIBIFCOREMD.LIB', '/DEFAULTLIB:LIBIFPORTMD', '/DEFAULTLIB:LIBMMD.LIB',
'/DEFAULTLIB:kernel32.lib', '/DEFAULTLIB:user32.lib', '/DEFAULTLIB:advapi32.lib',
'/FIXED:NO', '/dll',
'/def:%E', '/out:%U', '%F', '%A', '%L', '%B',
'oldnames.lib', 'user32.lib', 'ws2_32.lib', 'netapi32.lib', 'advapi32.lib']
link_exe=['LINK',
'/nologo', '/INCREMENTAL:NO', '/subsystem:console', '/machine:AMD64', '/STACK:20000000',
'/NODEFAULTLIB:LIBC.LIB', '/NODEFAULTLIB:LIBCMT.LIB', '/DEFAULTLIB:OLDNAMES.LIB', '/DEFAULTLIB:LIBIFCOREMD.LIB',
'/DEFAULTLIB:LIBIFPORTMD', '/DEFAULTLIB:LIBMMD.LIB', '/DEFAULTLIB:kernel32.lib',
'/DEFAULTLIB:user32.lib', '/DEFAULTLIB:advapi32.lib',
'/FIXED:NO', '/LARGEADDRESSAWARE',
'/out:%J', '%F', '%M', '%L', '%B', '%O',
'oldnames.lib', 'user32.lib', 'ws2_32.lib', 'netapi32.lib', 'advapi32.lib']
# Link command to be used for MAKE w/o fortran compiler.
# remove the pound signs in order to remove the comments and have the file take effect.
#
#link_exe=['LINK', '/nologo', 'INCREMENTAL:NO', '/subsystem:console', '/machine:AMD64', '/NODEFAULTLIB:LIBC.LIB', '/NODEFAULTLIB:LIBCMT.LIB',
# '/DEFAULTLIB:OLDNAMES.LIB', '/DEFAULTLIB:MSVCRT.LIB', '/DEFAULTLIB:kernel32.lib', 'DEFAULTLIB:user32.lib', '/DEFAULTLIB:advapi32.lib',
# '/FIXED:NO', '/LARGEADDRESSAWARE', '/DEBUG', '/out:%J', '%F', '%M', '%L', '%B', '%O', 'oldnames.lib', 'user32.lib', 'ws2_32.lib',
# 'netapi32.lib', 'advapi32.lib]
# MPI Configuration
mp_mode = THREADS
mp_mpi_implementation = NATIVE
mp_rsh_command = 'dummy %H -l %U -n %C'
mp_mpirun_path = {}
mpirun = ''
progDir = os.environ.get('ProgramFiles','C:\\Program Files')
for mpiDir in ('Microsoft HPC Pack', 'Microsoft HPC Pack 2008 R2', 'Microsoft HPC Pack 2008', 'Microsoft HPC Pack 2008 SDK'):
mpirun = progDir + os.sep + mpiDir + os.sep + 'bin' + os.sep + 'mpiexec.exe'
if os.path.exists(mpirun):
mp_mpirun_path[NATIVE] = mpirun
mp_mpirun_path[MSSDK] = os.path.join(progDir, mpiDir)
break
if os.environ.has_key('CCP_HOME'):
from queueCCS import QueueCCS
queues['default'] = QueueCCS(queueName='share')
queues['share'] = QueueCCS(queueName='share')
queues['local'] = QueueCCS(queueName='local')
queues['genxmlshare'] = QueueCCS(queueName='genxmlshare')
queues['genxmllocal'] = QueueCCS(queueName='genxmllocal')
del QueueCCS
mpirun = os.path.join(os.environ['CCP_HOME'], 'bin', 'mpiexec.exe')
if os.path.exists(mpirun):
mp_mpirun_path[NATIVE] = mpirun
run_mode=BATCH
if mp_mpirun_path:
mp_mode=MPI
del progDir, mpiDir, mpirun
graphicsEnv = driverUtils.locateFile(os.environ['ABA_PATH'],'site','graphicsConfig','env')
if graphicsEnv:
execfile(graphicsEnv)
else:
raise 'Cannot find the graphics configuration environment file (graphicsConfig.env)'
del driverUtils, os, graphicsEnv
license_server_type=FLEXNET
abaquslm_license_file=""
doc_root="
doc_root_type="html"
academic=RESEARCH

View File

@ -14,12 +14,6 @@ except(NameError):
class ASCIItable(): class ASCIItable():
"""Read and write to ASCII tables""" """Read and write to ASCII tables"""
__slots__ = ['__IO__',
'info',
'labeled',
'data',
]
tmpext = '_tmp' # filename extension for in-place access tmpext = '_tmp' # filename extension for in-place access
# ------------------------------------------------------------------ # ------------------------------------------------------------------

View File

@ -101,8 +101,6 @@ class Texture(Section):
class Material(): class Material():
"""Reads, manipulates and writes material.config files""" """Reads, manipulates and writes material.config files"""
__slots__ = ['data']
def __init__(self,verbose=True): def __init__(self,verbose=True):
"""Generates ordered list of parts""" """Generates ordered list of parts"""
self.parts = [ self.parts = [

View File

@ -48,17 +48,16 @@ if options.asciitable is not None and os.path.isfile(options.asciitable):
buffered = False, buffered = False,
readonly = True) readonly = True)
linkedTable.head_read() # read ASCII header info of linked table linkedTable.head_read() # read ASCII header info of linked table
if linkedTable.label_dimension(options.link[1]) != 1: linkDim = linkedTable.label_dimension(options.link[1]) # dimension of linking column
parser.error('linking column {} needs to be scalar valued.'.format(options.link[1]))
missing_labels = linkedTable.data_readArray([options.link[1]]+options.label) missing_labels = linkedTable.data_readArray([options.link[1]]+options.label) # try reading linked ASCII table
linkedTable.close() # close linked ASCII table linkedTable.close() # close linked ASCII table
if len(missing_labels) > 0: if len(missing_labels) > 0:
damask.util.croak('column{} {} not found...'.format('s' if len(missing_labels) > 1 else '',', '.join(missing_labels))) damask.util.croak('column{} {} not found...'.format('s' if len(missing_labels) > 1 else '',', '.join(missing_labels)))
index = linkedTable.data[:,0] index = linkedTable.data[:,:linkDim]
data = linkedTable.data[:,1:] data = linkedTable.data[:,linkDim:]
else: else:
parser.error('no linked ASCIItable given.') parser.error('no linked ASCIItable given.')
@ -80,8 +79,10 @@ for name in filenames:
errors = [] errors = []
linkColumn = table.label_index(options.link[0]) myLink = table.label_index (options.link[0])
if linkColumn < 0: errors.append('linking column {} not found.'.format(options.link[0])) myLinkDim = table.label_dimension(options.link[0])
if myLink < 0: errors.append('linking column {} not found.'.format(options.link[0]))
if myLinkDim != linkDim: errors.append('dimension mismatch for column {}.'.format(options.link[0]))
if errors != []: if errors != []:
damask.util.croak(errors) damask.util.croak(errors)
@ -91,7 +92,7 @@ for name in filenames:
# ------------------------------------------ assemble header -------------------------------------- # ------------------------------------------ assemble header --------------------------------------
table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:]))
table.labels_append(linkedTable.labels(raw = True)[1:]) # extend with new labels (except for linked column) table.labels_append(linkedTable.labels(raw = True)[linkDim:]) # extend with new labels (except for linked column)
table.head_write() table.head_write()
@ -100,7 +101,7 @@ for name in filenames:
outputAlive = True outputAlive = True
while outputAlive and table.data_read(): # read next data line of ASCII table while outputAlive and table.data_read(): # read next data line of ASCII table
try: try:
table.data_append(data[np.argwhere(index == float(table.data[linkColumn]))[0]]) # add data from first matching line table.data_append(data[np.argwhere(np.all((map(float,table.data[myLink:myLink+myLinkDim]) - index)==0,axis=1))[0]]) # add data of first matching line
except IndexError: except IndexError:
table.data_append(np.nan*np.ones_like(data[0])) # or add NaNs table.data_append(np.nan*np.ones_like(data[0])) # or add NaNs
outputAlive = table.data_write() # output processed line outputAlive = table.data_write() # output processed line

View File

@ -61,7 +61,6 @@ for name in filenames:
# ------------------------------------------ process data --------------------------------------- # ------------------------------------------ process data ---------------------------------------
table.data_readArray(options.pos) table.data_readArray(options.pos)
if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape
if table.data.shape[1] < 3: if table.data.shape[1] < 3:
table.data = np.hstack((table.data, table.data = np.hstack((table.data,
np.zeros((table.data.shape[0], np.zeros((table.data.shape[0],

View File

@ -70,7 +70,6 @@ for name in filenames:
# --------------- figure out size and grid --------------------------------------------------------- # --------------- figure out size and grid ---------------------------------------------------------
table.data_readArray(options.pos) table.data_readArray(options.pos)
if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape
if table.data.shape[1] < 3: if table.data.shape[1] < 3:
table.data = np.hstack((table.data, table.data = np.hstack((table.data,
np.zeros((table.data.shape[0], np.zeros((table.data.shape[0],

View File

@ -36,8 +36,9 @@ parser.add_option('-c', '--center', dest='center', type='float', nargs = 3,
parser.add_option('-d', '--dimension', dest='dimension', type='float', nargs = 3, metavar=' '.join(['float']*3), parser.add_option('-d', '--dimension', dest='dimension', type='float', nargs = 3, metavar=' '.join(['float']*3),
help='a,b,c extension of hexahedral box; negative values are diameters') help='a,b,c extension of hexahedral box; negative values are diameters')
parser.add_option('-e', '--exponent', dest='exponent', type='float', nargs = 3, metavar=' '.join(['float']*3), parser.add_option('-e', '--exponent', dest='exponent', type='float', nargs = 3, metavar=' '.join(['float']*3),
help='i,j,k exponents for axes - 2 gives a sphere (x^2 + y^2 + z^2 < 1), 1 makes \ help='i,j,k exponents for axes - 0 gives octahedron (|x|^(2^0) + |y|^(2^0) + |z|^(2^0) < 1), \
octahedron (|x| + |y| + |z| < 1). Large values produce boxes, 0 - 1 is concave. ') 1 gives a sphere (|x|^(2^1) + |y|^(2^1) + |z|^(2^1) < 1), \
large values produce boxes, negative turns concave.')
parser.add_option('-f', '--fill', dest='fill', type='int', metavar = 'int', parser.add_option('-f', '--fill', dest='fill', type='int', metavar = 'int',
help='grain index to fill primitive. "0" selects maximum microstructure index + 1 [%default]') help='grain index to fill primitive. "0" selects maximum microstructure index + 1 [%default]')
parser.add_option('-q', '--quaternion', dest='quaternion', type='float', nargs = 4, metavar=' '.join(['float']*4), parser.add_option('-q', '--quaternion', dest='quaternion', type='float', nargs = 4, metavar=' '.join(['float']*4),
@ -48,15 +49,14 @@ parser.add_option( '--degrees', dest='degrees', action='store_true',
help = 'angle is given in degrees [%default]') help = 'angle is given in degrees [%default]')
parser.add_option( '--nonperiodic', dest='periodic', action='store_false', parser.add_option( '--nonperiodic', dest='periodic', action='store_false',
help = 'wrap around edges [%default]') help = 'wrap around edges [%default]')
parser.add_option( '--voxelspace', dest='voxelspace', action='store_true', parser.add_option( '--realspace', dest='realspace', action='store_true',
help = '-c and -d are given in (0 to grid) coordinates instead of (origin to origin+size) \ help = '-c and -d span [origin,origin+size] instead of [0,grid] coordinates')
coordinates [%default]')
parser.set_defaults(center = (.0,.0,.0), parser.set_defaults(center = (.0,.0,.0),
fill = 0, fill = 0,
degrees = False, degrees = False,
exponent = (1e10,1e10,1e10), # box shape by default exponent = (20,20,20), # box shape by default
periodic = True, periodic = True,
voxelspace = False realspace = False,
) )
(options, filenames) = parser.parse_args() (options, filenames) = parser.parse_args()
@ -74,14 +74,16 @@ else:
options.center = np.array(options.center) options.center = np.array(options.center)
options.dimension = np.array(options.dimension) options.dimension = np.array(options.dimension)
# undo logarithmic sense of exponent and generate ellipsoids for negative dimensions (backward compatibility)
options.exponent = np.where(np.array(options.dimension) > 0, np.power(2,options.exponent), 2)
# --- loop over input files ------------------------------------------------------------------------- # --- loop over input files -------------------------------------------------------------------------
if filenames == []: filenames = [None] if filenames == []: filenames = [None]
for name in filenames: for name in filenames:
try: try: table = damask.ASCIItable(name = name,
table = damask.ASCIItable(name = name, buffered = False,
buffered = False, labeled = False) labeled = False)
except: continue except: continue
damask.util.report(scriptName,name) damask.util.report(scriptName,name)
@ -115,45 +117,42 @@ for name in filenames:
'microstructures': 0, 'microstructures': 0,
} }
if options.fill == 0: options.fill = microstructure.max()+1 if options.fill == 0 else options.fill
options.fill = microstructure.max()+1
# If we have a negative dimension, make it an ellipsoid for backwards compatibility
options.exponent = np.where(np.array(options.dimension) > 0, options.exponent, 2)
microstructure = microstructure.reshape(info['grid'],order='F') microstructure = microstructure.reshape(info['grid'],order='F')
# coordinates given in real space (default) vs voxel space # coordinates given in real space (default) vs voxel space
if not options.voxelspace: if options.realspace:
options.center += info['origin'] options.center -= info['origin']
options.center *= np.array(info['grid']) / np.array(info['size']) options.center *= np.array(info['grid']) / np.array(info['size'])
options.dimension *= np.array(info['grid']) / np.array(info['size']) options.dimension *= np.array(info['grid']) / np.array(info['size'])
size = microstructure.shape grid = microstructure.shape
# change to coordinate space where the primitive is the unit sphere/cube/etc # change to coordinate space where the primitive is the unit sphere/cube/etc
if options.periodic: # use padding to achieve periodicity if options.periodic: # use padding to achieve periodicity
(X, Y, Z) = np.meshgrid(np.arange(-size[0]/2, (3*size[0])/2, dtype=np.float32), # 50% padding on each side (X, Y, Z) = np.meshgrid(np.arange(-grid[0]/2, (3*grid[0])/2, dtype=np.float32), # 50% padding on each side
np.arange(-size[1]/2, (3*size[1])/2, dtype=np.float32), np.arange(-grid[1]/2, (3*grid[1])/2, dtype=np.float32),
np.arange(-size[2]/2, (3*size[2])/2, dtype=np.float32), np.arange(-grid[2]/2, (3*grid[2])/2, dtype=np.float32),
indexing='ij') indexing='ij')
# Padding handling # Padding handling
X = np.roll(np.roll(np.roll(X, X = np.roll(np.roll(np.roll(X,
-size[0]/2, axis=0), -grid[0]/2, axis=0),
-size[1]/2, axis=1), -grid[1]/2, axis=1),
-size[2]/2, axis=2) -grid[2]/2, axis=2)
Y = np.roll(np.roll(np.roll(Y, Y = np.roll(np.roll(np.roll(Y,
-size[0]/2, axis=0), -grid[0]/2, axis=0),
-size[1]/2, axis=1), -grid[1]/2, axis=1),
-size[2]/2, axis=2) -grid[2]/2, axis=2)
Z = np.roll(np.roll(np.roll(Z, Z = np.roll(np.roll(np.roll(Z,
-size[0]/2, axis=0), -grid[0]/2, axis=0),
-size[1]/2, axis=1), -grid[1]/2, axis=1),
-size[2]/2, axis=2) -grid[2]/2, axis=2)
else: # nonperiodic, much lighter on resources else: # nonperiodic, much lighter on resources
# change to coordinate space where the primitive is the unit sphere/cube/etc # change to coordinate space where the primitive is the unit sphere/cube/etc
(X, Y, Z) = np.meshgrid(np.arange(0, size[0], dtype=np.float32), (X, Y, Z) = np.meshgrid(np.arange(0, grid[0], dtype=np.float32),
np.arange(0, size[1], dtype=np.float32), np.arange(0, grid[1], dtype=np.float32),
np.arange(0, size[2], dtype=np.float32), np.arange(0, grid[2], dtype=np.float32),
indexing='ij') indexing='ij')
# first by translating the center onto 0, 0.5 shifts the voxel origin onto the center of the voxel # first by translating the center onto 0, 0.5 shifts the voxel origin onto the center of the voxel
@ -174,27 +173,27 @@ for name in filenames:
np.seterr(over='ignore', under='ignore') np.seterr(over='ignore', under='ignore')
if options.periodic: # use padding to achieve periodicity if options.periodic: # use padding to achieve periodicity
inside = np.zeros(size, dtype=bool) inside = np.zeros(grid, dtype=bool)
for i in range(2): for i in range(2):
for j in range(2): for j in range(2):
for k in range(2): for k in range(2):
inside = inside | ( # Most of this is handling the padding inside = inside | ( # Most of this is handling the padding
np.abs(X[size[0] * i : size[0] * (i+1), np.abs(X[grid[0] * i : grid[0] * (i+1),
size[1] * j : size[1] * (j+1), grid[1] * j : grid[1] * (j+1),
size[2] * k : size[2] * (k+1)])**options.exponent[0] + grid[2] * k : grid[2] * (k+1)])**options.exponent[0] +
np.abs(Y[size[0] * i : size[0] * (i+1), np.abs(Y[grid[0] * i : grid[0] * (i+1),
size[1] * j : size[1] * (j+1), grid[1] * j : grid[1] * (j+1),
size[2] * k : size[2] * (k+1)])**options.exponent[1] + grid[2] * k : grid[2] * (k+1)])**options.exponent[1] +
np.abs(Z[size[0] * i : size[0] * (i+1), np.abs(Z[grid[0] * i : grid[0] * (i+1),
size[1] * j : size[1] * (j+1), grid[1] * j : grid[1] * (j+1),
size[2] * k : size[2] * (k+1)])**options.exponent[2] < 1) grid[2] * k : grid[2] * (k+1)])**options.exponent[2] <= 1.0)
microstructure = np.where(inside, options.fill, microstructure) microstructure = np.where(inside, options.fill, microstructure)
else: # nonperiodic, much lighter on resources else: # nonperiodic, much lighter on resources
microstructure = np.where(np.abs(X)**options.exponent[0] + microstructure = np.where(np.abs(X)**options.exponent[0] +
np.abs(Y)**options.exponent[1] + np.abs(Y)**options.exponent[1] +
np.abs(Z)**options.exponent[2] < 1, options.fill, microstructure) np.abs(Z)**options.exponent[2] <= 1.0, options.fill, microstructure)
np.seterr(**old_settings) # Reset warnings to old state np.seterr(**old_settings) # Reset warnings to old state
newInfo['microstructures'] = microstructure.max() newInfo['microstructures'] = microstructure.max()
@ -209,11 +208,11 @@ for name in filenames:
table.info_clear() table.info_clear()
table.info_append([ table.info_append([
scriptID + ' ' + ' '.join(sys.argv[1:]), scriptID + ' ' + ' '.join(sys.argv[1:]),
"grid\ta {grid[0]}\tb {grid[1]}\tc {grid[2]}".format(grid=info['grid']), "grid\ta {}\tb {}\tc {}".format(*info['grid']),
"size\tx {size[0]}\ty {size[1]}\tz {size[2]}".format(size=info['size']), "size\tx {}\ty {}\tz {}".format(*info['size']),
"origin\tx {origin[0]}\ty {origin[1]}\tz {origin[2]}".format(origin=info['origin']), "origin\tx {}\ty {}\tz {}".format(*info['origin']),
"homogenization\t{homog}".format(homog=info['homogenization']), "homogenization\t{}".format(info['homogenization']),
"microstructures\t{microstructures}".format(microstructures=newInfo['microstructures']), "microstructures\t{}".format(newInfo['microstructures']),
extra_header extra_header
]) ])
table.labels_clear() table.labels_clear()