diff --git a/VERSION b/VERSION index 1f78eb57f..7e9361c34 100644 --- a/VERSION +++ b/VERSION @@ -1 +1 @@ -v2.0.0-196-g1940b8d +v2.0.0-205-gced4d9d diff --git a/code/lattice.f90 b/code/lattice.f90 index d08d42c06..bf1f53658 100644 --- a/code/lattice.f90 +++ b/code/lattice.f90 @@ -17,13 +17,7 @@ module lattice LATTICE_maxNslipFamily = 13_pInt, & !< max # of slip system families over lattice structures LATTICE_maxNtwinFamily = 4_pInt, & !< max # of twin system families over lattice structures LATTICE_maxNtransFamily = 2_pInt, & !< max # of transformation system families over lattice structures - LATTICE_maxNcleavageFamily = 3_pInt, & !< max # of transformation system families over lattice structures - LATTICE_maxNslip = 52_pInt, & !< max # of slip systems over lattice structures - LATTICE_maxNtwin = 24_pInt, & !< max # of twin systems over lattice structures - LATTICE_maxNinteraction = 182_pInt, & !< max # of interaction types (in hardening matrix part) - LATTICE_maxNnonSchmid = 6_pInt, & !< max # of non schmid contributions over lattice structures - LATTICE_maxNtrans = 12_pInt, & !< max # of transformations over lattice structures - LATTICE_maxNcleavage = 9_pInt !< max # of cleavage over lattice structures + LATTICE_maxNcleavageFamily = 3_pInt !< max # of transformation system families over lattice structures integer(pInt), allocatable, dimension(:,:), protected, public :: & lattice_NslipSystem, & !< total # of slip systems in each family @@ -80,25 +74,25 @@ module lattice lattice_NnonSchmid !< total # of non-Schmid contributions for each structure !-------------------------------------------------------------------------------------------------- -! fcc +! face centered cubic integer(pInt), dimension(LATTICE_maxNslipFamily), parameter, public :: & - LATTICE_fcc_NslipSystem = int([12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],pInt) !< total # of slip systems per family for fcc + LATTICE_fcc_NslipSystem = int([12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],pInt) !< # of slip systems per family for fcc integer(pInt), dimension(LATTICE_maxNtwinFamily), parameter, public :: & - LATTICE_fcc_NtwinSystem = int([12, 0, 0, 0],pInt) !< total # of twin systems per family for fcc + LATTICE_fcc_NtwinSystem = int([12, 0, 0, 0],pInt) !< # of twin systems per family for fcc integer(pInt), dimension(LATTICE_maxNtransFamily), parameter, public :: & - LATTICE_fcc_NtransSystem = int([12, 0],pInt) !< total # of transformation systems per family for fcc + LATTICE_fcc_NtransSystem = int([12, 0],pInt) !< # of transformation systems per family for fcc integer(pInt), dimension(LATTICE_maxNcleavageFamily), parameter, public :: & - LATTICE_fcc_NcleavageSystem = int([3, 4, 0],pInt) !< total # of cleavage systems per family for fcc + LATTICE_fcc_NcleavageSystem = int([3, 4, 0],pInt) !< # of cleavage systems per family for fcc integer(pInt), parameter, private :: & - LATTICE_fcc_Nslip = sum(lattice_fcc_NslipSystem), & !< total # of slip systems for fcc - LATTICE_fcc_Ntwin = sum(lattice_fcc_NtwinSystem), & !< total # of twin systems for fcc + LATTICE_fcc_Nslip = 12_pInt, & !sum(lattice_fcc_NslipSystem), & !< total # of slip systems for fcc + LATTICE_fcc_Ntwin = 12_pInt, & !sum(lattice_fcc_NtwinSystem), & !< total # of twin systems for fcc LATTICE_fcc_NnonSchmid = 0_pInt, & !< total # of non-Schmid contributions for fcc - LATTICE_fcc_Ntrans = sum(lattice_fcc_NtransSystem), & !< total # of transformation systems for fcc - LATTICE_fcc_Ncleavage = sum(lattice_fcc_NcleavageSystem) !< total # of cleavage systems for fcc + LATTICE_fcc_Ntrans = 12_pInt, & !sum(lattice_fcc_NtransSystem), & !< total # of transformation systems for fcc + LATTICE_fcc_Ncleavage = 7_pInt !sum(lattice_fcc_NcleavageSystem) !< total # of cleavage systems for fcc real(pReal), dimension(3+3,LATTICE_fcc_Nslip), parameter, private :: & LATTICE_fcc_systemSlip = reshape(real([& @@ -312,8 +306,8 @@ module lattice 0.0, 0.0, 1.0, 45.0 & ],[ 4_pInt,LATTICE_fcc_Ntrans]) - real(pReal), dimension(LATTICE_fcc_Ntrans,LATTICE_fcc_Ntrans), parameter, private :: & ! Matrix for projection of shear from slip system to fault-band (twin) systems - LATTICE_fccTobcc_projectionTrans = reshape(real([& ! For ns = nt = nr + real(pReal), dimension(LATTICE_fcc_Ntrans,LATTICE_fcc_Ntrans), parameter, private :: & ! Matrix for projection of shear from slip system to fault-band (twin) systems + LATTICE_fccTobcc_projectionTrans = reshape(real([& ! For ns = nt = nr 0, 1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, & -1, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, & 1,-1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, & @@ -363,25 +357,25 @@ module lattice ],pReal),[ 3_pInt + 3_pInt,LATTICE_fcc_Ncleavage]) !-------------------------------------------------------------------------------------------------- -! bcc +! body centered cubic integer(pInt), dimension(LATTICE_maxNslipFamily), parameter, public :: & - LATTICE_bcc_NslipSystem = int([ 12, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], pInt) !< total # of slip systems per family for bcc + LATTICE_bcc_NslipSystem = int([ 12, 12, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], pInt) !< # of slip systems per family for bcc integer(pInt), dimension(LATTICE_maxNtwinFamily), parameter, public :: & - LATTICE_bcc_NtwinSystem = int([ 12, 0, 0, 0], pInt) !< total # of twin systems per family for bcc + LATTICE_bcc_NtwinSystem = int([ 12, 0, 0, 0], pInt) !< # of twin systems per family for bcc integer(pInt), dimension(LATTICE_maxNtransFamily), parameter, public :: & - LATTICE_bcc_NtransSystem = int([0,0],pInt) !< total # of transformation systems per family for bcc + LATTICE_bcc_NtransSystem = int([0,0],pInt) !< # of transformation systems per family for bcc integer(pInt), dimension(LATTICE_maxNcleavageFamily), parameter, public :: & - LATTICE_bcc_NcleavageSystem = int([3,6,0],pInt) !< total # of cleavage systems per family for bcc + LATTICE_bcc_NcleavageSystem = int([3,6,0],pInt) !< # of cleavage systems per family for bcc integer(pInt), parameter, private :: & - LATTICE_bcc_Nslip = sum(lattice_bcc_NslipSystem), & !< total # of slip systems for bcc - LATTICE_bcc_Ntwin = sum(lattice_bcc_NtwinSystem), & !< total # of twin systems for bcc + LATTICE_bcc_Nslip = 24_pInt, & !sum(lattice_bcc_NslipSystem), & !< total # of slip systems for bcc + LATTICE_bcc_Ntwin = 12_pInt, & !sum(lattice_bcc_NtwinSystem), & !< total # of twin systems for bcc LATTICE_bcc_NnonSchmid = 6_pInt, & !< total # of non-Schmid contributions for bcc (A. Koester, A. Ma, A. Hartmaier 2012) - LATTICE_bcc_Ntrans = sum(lattice_bcc_NtransSystem), & !< total # of transformation systems for bcc - LATTICE_bcc_Ncleavage = sum(lattice_bcc_NcleavageSystem) !< total # of cleavage systems for bcc + LATTICE_bcc_Ntrans = 0_pInt, & !sum(lattice_bcc_NtransSystem), & !< total # of transformation systems for bcc + LATTICE_bcc_Ncleavage = 9_pInt !sum(lattice_bcc_NcleavageSystem) !< total # of cleavage systems for bcc real(pReal), dimension(3+3,LATTICE_bcc_Nslip), parameter, private :: & LATTICE_bcc_systemSlip = reshape(real([& @@ -560,7 +554,7 @@ module lattice ],pReal),[ 3_pInt + 3_pInt,LATTICE_bcc_Ncleavage]) !-------------------------------------------------------------------------------------------------- -! hex +! hexagonal integer(pInt), dimension(LATTICE_maxNslipFamily), parameter, public :: & lattice_hex_NslipSystem = int([ 3, 3, 3, 6, 12, 6, 0, 0, 0, 0, 0, 0, 0],pInt) !< # of slip systems per family for hex @@ -568,17 +562,17 @@ module lattice lattice_hex_NtwinSystem = int([ 6, 6, 6, 6],pInt) !< # of slip systems per family for hex integer(pInt), dimension(LATTICE_maxNtransFamily), parameter, public :: & - LATTICE_hex_NtransSystem = int([0,0],pInt) !< total # of transformation systems per family for hex + LATTICE_hex_NtransSystem = int([0,0],pInt) !< # of transformation systems per family for hex integer(pInt), dimension(LATTICE_maxNcleavageFamily), parameter, public :: & - LATTICE_hex_NcleavageSystem = int([3,0,0],pInt) !< total # of cleavage systems per family for hex + LATTICE_hex_NcleavageSystem = int([3,0,0],pInt) !< # of cleavage systems per family for hex integer(pInt), parameter, private :: & - LATTICE_hex_Nslip = sum(lattice_hex_NslipSystem), & !< total # of slip systems for hex - LATTICE_hex_Ntwin = sum(lattice_hex_NtwinSystem), & !< total # of twin systems for hex + LATTICE_hex_Nslip = 33_pInt, & !sum(lattice_hex_NslipSystem), & !< total # of slip systems for hex + LATTICE_hex_Ntwin = 24_pInt, & !sum(lattice_hex_NtwinSystem), & !< total # of twin systems for hex LATTICE_hex_NnonSchmid = 0_pInt, & !< total # of non-Schmid contributions for hex - LATTICE_hex_Ntrans = sum(lattice_hex_NtransSystem), & !< total # of transformation systems for hex - LATTICE_hex_Ncleavage = sum(lattice_hex_NcleavageSystem) !< total # of cleavage systems for hex + LATTICE_hex_Ntrans = 0_pInt, & !sum(lattice_hex_NtransSystem), & !< total # of transformation systems for hex + LATTICE_hex_Ncleavage = 3_pInt !sum(lattice_hex_NcleavageSystem) !< total # of cleavage systems for hex real(pReal), dimension(4+4,LATTICE_hex_Nslip), parameter, private :: & LATTICE_hex_systemSlip = reshape(real([& @@ -842,25 +836,25 @@ module lattice !-------------------------------------------------------------------------------------------------- -! bct +! body centered tetragonal integer(pInt), dimension(LATTICE_maxNslipFamily), parameter, public :: & LATTICE_bct_NslipSystem = int([2, 2, 2, 4, 2, 4, 2, 2, 4, 8, 4, 8, 8 ],pInt) !< # of slip systems per family for bct (Sn) Bieler J. Electr Mater 2009 integer(pInt), dimension(LATTICE_maxNtwinFamily), parameter, public :: & - LATTICE_bct_NtwinSystem = int([0, 0, 0, 0], pInt) !< total # of twin systems per family for bct-example + LATTICE_bct_NtwinSystem = int([0, 0, 0, 0], pInt) !< # of twin systems per family for bct integer(pInt), dimension(LATTICE_maxNtransFamily), parameter, public :: & - LATTICE_bct_NtransSystem = int([0,0],pInt) !< total # of transformation systems per family for bct + LATTICE_bct_NtransSystem = int([0,0],pInt) !< # of transformation systems per family for bct integer(pInt), dimension(LATTICE_maxNcleavageFamily), parameter, public :: & - LATTICE_bct_NcleavageSystem = int([0,0,0],pInt) !< total # of cleavage systems per family for bct + LATTICE_bct_NcleavageSystem = int([0,0,0],pInt) !< # of cleavage systems per family for bct integer(pInt), parameter, private :: & - LATTICE_bct_Nslip = sum(lattice_bct_NslipSystem), & !< total # of slip systems for bct - LATTICE_bct_Ntwin = sum(lattice_bct_NtwinSystem), & !< total # of twin systems for bct + LATTICE_bct_Nslip = 52_pInt, & !sum(lattice_bct_NslipSystem), & !< total # of slip systems for bct + LATTICE_bct_Ntwin = 0_pInt, & !sum(lattice_bct_NtwinSystem), & !< total # of twin systems for bct LATTICE_bct_NnonSchmid = 0_pInt, & !< total # of non-Schmid contributions for bct - LATTICE_bct_Ntrans = sum(lattice_bct_NtransSystem), & !< total # of transformation systems for bct - LATTICE_bct_Ncleavage = sum(lattice_bct_NcleavageSystem) !< total # of cleavage systems for bct + LATTICE_bct_Ntrans = 0_pInt, & !sum(lattice_bct_NtransSystem), & !< total # of transformation systems for bct + LATTICE_bct_Ncleavage = 0_pInt !sum(lattice_bct_NcleavageSystem) !< total # of cleavage systems for bct real(pReal), dimension(3+3,LATTICE_bct_Nslip), parameter, private :: & LATTICE_bct_systemSlip = reshape(real([& @@ -1003,12 +997,25 @@ module lattice !-------------------------------------------------------------------------------------------------- ! isotropic + integer(pInt), dimension(LATTICE_maxNslipFamily), parameter, public :: & + LATTICE_iso_NslipSystem = int([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],pInt) !< # of slip systems per family for iso + + integer(pInt), dimension(LATTICE_maxNtwinFamily), parameter, public :: & + LATTICE_iso_NtwinSystem = int([0, 0, 0, 0], pInt) !< # of twin systems per family for iso + + integer(pInt), dimension(LATTICE_maxNtransFamily), parameter, public :: & + LATTICE_iso_NtransSystem = int([0, 0],pInt) !< # of transformation systems per family for iso + integer(pInt), dimension(LATTICE_maxNcleavageFamily), parameter, public :: & - LATTICE_iso_NcleavageSystem = int([3,0,0],pInt) !< total # of cleavage systems per family for iso + LATTICE_iso_NcleavageSystem = int([3,0,0],pInt) !< # of cleavage systems per family for iso integer(pInt), parameter, private :: & - LATTICE_iso_Ncleavage = sum(LATTICE_iso_NcleavageSystem) !< total # of cleavage systems for iso - + LATTICE_iso_Nslip = 0_pInt, & !sum(lattice_iso_NslipSystem), & !< total # of slip systems for iso + LATTICE_iso_Ntwin = 0_pInt, & !sum(lattice_iso_NtwinSystem), & !< total # of twin systems for iso + LATTICE_iso_NnonSchmid = 0_pInt, & !< total # of non-Schmid contributions for iso + LATTICE_iso_Ntrans = 0_pInt, & !sum(lattice_iso_NtransSystem), & !< total # of transformation systems for iso + LATTICE_iso_Ncleavage = 3_pInt !sum(lattice_iso_NcleavageSystem) !< total # of cleavage systems for iso + real(pReal), dimension(3+3,LATTICE_iso_Ncleavage), parameter, private :: & LATTICE_iso_systemCleavage = reshape(real([& ! Cleavage direction Plane normal @@ -1019,12 +1026,25 @@ module lattice !-------------------------------------------------------------------------------------------------- ! orthorhombic + integer(pInt), dimension(LATTICE_maxNslipFamily), parameter, public :: & + LATTICE_ortho_NslipSystem = int([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 ],pInt) !< # of slip systems per family for ortho + + integer(pInt), dimension(LATTICE_maxNtwinFamily), parameter, public :: & + LATTICE_ortho_NtwinSystem = int([0, 0, 0, 0], pInt) !< # of twin systems per family for ortho + + integer(pInt), dimension(LATTICE_maxNtransFamily), parameter, public :: & + LATTICE_ortho_NtransSystem = int([0, 0],pInt) !< # of transformation systems per family for ortho + integer(pInt), dimension(LATTICE_maxNcleavageFamily), parameter, public :: & - LATTICE_ortho_NcleavageSystem = int([1,1,1],pInt) !< total # of cleavage systems per family for ortho + LATTICE_ortho_NcleavageSystem = int([1,1,1],pInt) !< # of cleavage systems per family for ortho integer(pInt), parameter, private :: & - LATTICE_ortho_Ncleavage = sum(LATTICE_ortho_NcleavageSystem) !< total # of cleavage systems for ortho - + LATTICE_ortho_Nslip = 0_pInt, & !sum(lattice_ortho_NslipSystem), & !< total # of slip systems for ortho + LATTICE_ortho_Ntwin = 0_pInt, & !sum(lattice_ortho_NtwinSystem), & !< total # of twin systems for ortho + LATTICE_ortho_NnonSchmid = 0_pInt, & !< total # of non-Schmid contributions for ortho + LATTICE_ortho_Ntrans = 0_pInt, & !sum(lattice_ortho_NtransSystem), & !< total # of transformation systems for ortho + LATTICE_ortho_Ncleavage = 3_pInt !sum(lattice_ortho_NcleavageSystem) !< total # of cleavage systems for ortho + real(pReal), dimension(3+3,LATTICE_ortho_Ncleavage), parameter, private :: & LATTICE_ortho_systemCleavage = reshape(real([& ! Cleavage direction Plane normal @@ -1033,6 +1053,26 @@ module lattice 1, 0, 0, 0, 0, 1 & ],pReal),[ 3_pInt + 3_pInt,LATTICE_ortho_Ncleavage]) + integer(pInt), parameter, public :: & + LATTICE_maxNslip = 52_pInt, & + !LATTICE_maxNslip = maxval([LATTICE_fcc_Nslip,LATTICE_bcc_Nslip,LATTICE_hex_Nslip,\ + ! LATTICE_bct_Nslip,LATTICE_iso_Nslip,LATTICE_ortho_Nslip]), & !< max # of slip systems over lattice structures + LATTICE_maxNtwin = 24_pInt, & + !LATTICE_maxNtwin = maxval([LATTICE_fcc_Ntwin,LATTICE_bcc_Ntwin,LATTICE_hex_Ntwin,\ + ! LATTICE_bct_Ntwin,LATTICE_iso_Ntwin,LATTICE_ortho_Ntwin]), & !< max # of twin systems over lattice structures + LATTICE_maxNnonSchmid = 6_pInt, & + !LATTICE_maxNtwin = maxval([LATTICE_fcc_NnonSchmid,LATTICE_bcc_NnonSchmid,\ + ! LATTICE_hex_NnonSchmid,LATTICE_bct_NnonSchmid,\ + ! LATTICE_iso_NnonSchmid,LATTICE_ortho_NnonSchmid]), & !< max # of non-Schmid contributions over lattice structures + LATTICE_maxNtrans = 12_pInt, & + !LATTICE_maxNtrans = maxval([LATTICE_fcc_Ntrans,LATTICE_bcc_Ntrans,LATTICE_hex_Ntrans,\ + ! LATTICE_bct_Ntrans,LATTICE_iso_Ntrans,LATTICE_ortho_Ntrans]),&!< max # of transformation systems over lattice structures + LATTICE_maxNcleavage = 9_pInt, & + !LATTICE_maxNcleavage = maxval([LATTICE_fcc_Ncleavage,LATTICE_bcc_Ncleavage,\ + ! LATTICE_hex_Ncleavage,LATTICE_bct_Ncleavage,\ + ! LATTICE_iso_Ncleavage,LATTICE_ortho_Ncleavage]) !< max # of cleavage systems over lattice structures + LATTICE_maxNinteraction = 182_pInt !< max # of interaction types (in hardening matrix part) + real(pReal), dimension(:,:,:), allocatable, public, protected :: & lattice_C66, lattice_trans_C66 real(pReal), dimension(:,:,:,:,:), allocatable, public, protected :: & @@ -1250,7 +1290,7 @@ subroutine lattice_init endif mainProcess !-------------------------------------------------------------------------------------------------- -! consistency checks +! consistency checks (required since ifort 15.0 does not support sum/maxval in parameter definition) if (LATTICE_maxNslip /= maxval([LATTICE_fcc_Nslip,LATTICE_bcc_Nslip,LATTICE_hex_Nslip,LATTICE_bct_Nslip])) & call IO_error(0_pInt,ext_msg = 'LATTICE_maxNslip') diff --git a/code/math.f90 b/code/math.f90 index 624990ac4..9ad98df9e 100644 --- a/code/math.f90 +++ b/code/math.f90 @@ -839,6 +839,9 @@ subroutine math_invert(myDim,A, InvA, error) real(pReal), dimension(myDim,myDim), intent(out) :: invA logical, intent(out) :: error + external :: & + dgetrf, & + dgetri invA = A call dgetrf(myDim,myDim,invA,myDim,ipiv,ierr) diff --git a/installation/mods_Abaqus/abaqus_v6_windows.env b/installation/mods_Abaqus/abaqus_v6_windows.env index 66e897ff3..3a3445c7b 100644 --- a/installation/mods_Abaqus/abaqus_v6_windows.env +++ b/installation/mods_Abaqus/abaqus_v6_windows.env @@ -1,177 +1,177 @@ -# -# System-Wide Abaqus Environment File -# ------------------------------------- -standard_parallel = ALL -mp_mode = MPI -mp_file_system = (DETECT,DETECT) -mp_num_parallel_ftps = (4, 4) -mp_environment_export = ('MPI_PROPAGATE_TSTP', - 'ABA_CM_BUFFERING', - 'ABA_CM_BUFFERING_LIMIT', - 'ABA_ITERATIVE_SOLVER_VERBOSE', - 'ABA_DMPSOLVER_BWDPARALLELOFF', - 'ABA_ELP_SURFACE_SPLIT', - 'ABA_ELP_SUSPEND', - 'ABA_HOME', - 'ABA_MEMORY_MODE', - 'ABA_MPI_MESSAGE_TRACKING', - 'ABA_MPI_VERBOSE_LEVEL', - 'ABA_PATH', - 'ABAQUS_CSE_RELTIMETOLERANCE', - 'ABA_RESOURCE_MONITOR', - 'ABA_RESOURCE_USEMALLINFO', - 'ABAQUS_LANG', - 'ABAQUS_CSE_CURRCONFIGMAPPING', - 'ABAQUS_MPF_DIAGNOSTIC_LEVEL', - 'ABAQUSLM_LICENSE_FILE', - 'ABQ_CRTMALLOC', - 'ABQ_DATACHECK', - 'ABQ_RECOVER', - 'ABQ_RESTART', - 'ABQ_SPLITFILE', - 'ABQ_XPL_WINDOWDUMP', - 'ABQ_XPL_PARTITIONSIZE', - 'ABQLMHANGLIMIT', - 'ABQLMQUEUE', - 'ABQLMUSER', - 'CCI_RENDEZVOUS', - 'DOMAIN', - 'DOMAIN_CPUS', - 'DOUBLE_PRECISION', - 'FLEXLM_DIAGNOSTICS', - 'FOR0006', - 'FOR0064', - 'FOR_IGNORE_EXCEPTIONS', - 'FOR_DISABLE_DIAGNOSTIC_DISPLAY', - 'LD_PRELOAD', - 'MP_NUMBER_OF_THREADS', - 'MPC_GANG', - 'MPI_FLAGS', - 'MPI_FLUSH_FCACHE', - 'MPI_RDMA_NENVELOPE', - 'MPI_SOCKBUFSIZE', - 'MPI_USE_MALLOPT_MMAP_MAX', - 'MPI_USE_MALLOPT_MMAP_THRESHOLD', - 'MPI_USE_MALLOPT_SBRK_PROTECTION', - 'MPI_WORKDIR', - 'MPCCI_DEBUG', - 'MPCCI_CODEID', - 'MPCCI_JOBID', - 'MPCCI_NETDEVICE', - 'MPCCI_TINFO', - 'MPCCI_SERVER', - 'ABAQUS_CCI_DEBUG', - 'NCPUS', - 'OMP_DYNAMIC', - 'OMP_NUM_THREADS', - 'OUTDIR', - 'PAIDUP', - 'PARALLEL_METHOD', - 'RAIDEV_NDREG_LAZYMEM', - 'ABA_SYMBOLIC_GENERALCOLLAPSE', - 'ABA_SYMBOLIC_GENERAL_MAXCLIQUERANK', - 'ABA_ADM_MINIMUMINCREASE', - 'ABA_ADM_MINIMUMDECREASE', - 'IPATH_NO_CPUAFFINITY', - 'MALLOC_MMAP_THRESHOLD_', - 'ABA_EXT_SIMOUTPUT', - 'SMA_WS', - 'SMA_PARENT', - 'SMA_PLATFORM', - 'ABA_PRE_DECOMPOSITION', - 'ACML_FAST_MALLOC', - 'ACML_FAST_MALLOC_CHUNK_SIZE', - 'ACML_FAST_MALLOC_MAX_CHUNKS', - 'ACML_FAST_MALLOC_DEBUG') - -import driverUtils, os -#-*- mode: python -*- - -# # -# Compile and Link command settings for the Windows 64 Platform # -# ( AMD Opteron / Intel EM64T ) # -# # - - -compile_fortran=['ifort', - '/c','/DABQ_WIN86_64', '/u', - '/iface:cref', '/recursive', '/Qauto-scalar', - '/QxSSE3', '/QaxAVX', - '/heap-arrays:1', - # '/Od', '/Ob0' # <-- Optimization - # '/Zi', # <-- Debugging - '/include:%I', '/free', '/O1', '/fpp', '/openmp', '/Qmkl'] - -link_sl=['LINK', - '/nologo', '/NOENTRY', '/INCREMENTAL:NO', '/subsystem:console', '/machine:AMD64', - '/NODEFAULTLIB:LIBC.LIB', '/NODEFAULTLIB:LIBCMT.LIB', - '/DEFAULTLIB:OLDNAMES.LIB', '/DEFAULTLIB:LIBIFCOREMD.LIB', '/DEFAULTLIB:LIBIFPORTMD', '/DEFAULTLIB:LIBMMD.LIB', - '/DEFAULTLIB:kernel32.lib', '/DEFAULTLIB:user32.lib', '/DEFAULTLIB:advapi32.lib', - '/FIXED:NO', '/dll', - '/def:%E', '/out:%U', '%F', '%A', '%L', '%B', - 'oldnames.lib', 'user32.lib', 'ws2_32.lib', 'netapi32.lib', 'advapi32.lib'] - -link_exe=['LINK', - '/nologo', '/INCREMENTAL:NO', '/subsystem:console', '/machine:AMD64', '/STACK:20000000', - '/NODEFAULTLIB:LIBC.LIB', '/NODEFAULTLIB:LIBCMT.LIB', '/DEFAULTLIB:OLDNAMES.LIB', '/DEFAULTLIB:LIBIFCOREMD.LIB', - '/DEFAULTLIB:LIBIFPORTMD', '/DEFAULTLIB:LIBMMD.LIB', '/DEFAULTLIB:kernel32.lib', - '/DEFAULTLIB:user32.lib', '/DEFAULTLIB:advapi32.lib', - '/FIXED:NO', '/LARGEADDRESSAWARE', - '/out:%J', '%F', '%M', '%L', '%B', '%O', - 'oldnames.lib', 'user32.lib', 'ws2_32.lib', 'netapi32.lib', 'advapi32.lib'] - -# Link command to be used for MAKE w/o fortran compiler. -# remove the pound signs in order to remove the comments and have the file take effect. -# -#link_exe=['LINK', '/nologo', 'INCREMENTAL:NO', '/subsystem:console', '/machine:AMD64', '/NODEFAULTLIB:LIBC.LIB', '/NODEFAULTLIB:LIBCMT.LIB', -# '/DEFAULTLIB:OLDNAMES.LIB', '/DEFAULTLIB:MSVCRT.LIB', '/DEFAULTLIB:kernel32.lib', 'DEFAULTLIB:user32.lib', '/DEFAULTLIB:advapi32.lib', -# '/FIXED:NO', '/LARGEADDRESSAWARE', '/DEBUG', '/out:%J', '%F', '%M', '%L', '%B', '%O', 'oldnames.lib', 'user32.lib', 'ws2_32.lib', -# 'netapi32.lib', 'advapi32.lib] - -# MPI Configuration -mp_mode = THREADS - -mp_mpi_implementation = NATIVE -mp_rsh_command = 'dummy %H -l %U -n %C' -mp_mpirun_path = {} -mpirun = '' -progDir = os.environ.get('ProgramFiles','C:\\Program Files') - -for mpiDir in ('Microsoft HPC Pack', 'Microsoft HPC Pack 2008 R2', 'Microsoft HPC Pack 2008', 'Microsoft HPC Pack 2008 SDK'): - mpirun = progDir + os.sep + mpiDir + os.sep + 'bin' + os.sep + 'mpiexec.exe' - if os.path.exists(mpirun): - mp_mpirun_path[NATIVE] = mpirun - mp_mpirun_path[MSSDK] = os.path.join(progDir, mpiDir) - break - -if os.environ.has_key('CCP_HOME'): - from queueCCS import QueueCCS - queues['default'] = QueueCCS(queueName='share') - queues['share'] = QueueCCS(queueName='share') - queues['local'] = QueueCCS(queueName='local') - queues['genxmlshare'] = QueueCCS(queueName='genxmlshare') - queues['genxmllocal'] = QueueCCS(queueName='genxmllocal') - del QueueCCS - mpirun = os.path.join(os.environ['CCP_HOME'], 'bin', 'mpiexec.exe') - if os.path.exists(mpirun): - mp_mpirun_path[NATIVE] = mpirun - run_mode=BATCH - -if mp_mpirun_path: - mp_mode=MPI - -del progDir, mpiDir, mpirun - -graphicsEnv = driverUtils.locateFile(os.environ['ABA_PATH'],'site','graphicsConfig','env') -if graphicsEnv: - execfile(graphicsEnv) -else: - raise 'Cannot find the graphics configuration environment file (graphicsConfig.env)' - -del driverUtils, os, graphicsEnv -license_server_type=FLEXNET - -abaquslm_license_file="" -doc_root=" -doc_root_type="html" -academic=RESEARCH +# +# System-Wide Abaqus Environment File +# ------------------------------------- +standard_parallel = ALL +mp_mode = MPI +mp_file_system = (DETECT,DETECT) +mp_num_parallel_ftps = (4, 4) +mp_environment_export = ('MPI_PROPAGATE_TSTP', + 'ABA_CM_BUFFERING', + 'ABA_CM_BUFFERING_LIMIT', + 'ABA_ITERATIVE_SOLVER_VERBOSE', + 'ABA_DMPSOLVER_BWDPARALLELOFF', + 'ABA_ELP_SURFACE_SPLIT', + 'ABA_ELP_SUSPEND', + 'ABA_HOME', + 'ABA_MEMORY_MODE', + 'ABA_MPI_MESSAGE_TRACKING', + 'ABA_MPI_VERBOSE_LEVEL', + 'ABA_PATH', + 'ABAQUS_CSE_RELTIMETOLERANCE', + 'ABA_RESOURCE_MONITOR', + 'ABA_RESOURCE_USEMALLINFO', + 'ABAQUS_LANG', + 'ABAQUS_CSE_CURRCONFIGMAPPING', + 'ABAQUS_MPF_DIAGNOSTIC_LEVEL', + 'ABAQUSLM_LICENSE_FILE', + 'ABQ_CRTMALLOC', + 'ABQ_DATACHECK', + 'ABQ_RECOVER', + 'ABQ_RESTART', + 'ABQ_SPLITFILE', + 'ABQ_XPL_WINDOWDUMP', + 'ABQ_XPL_PARTITIONSIZE', + 'ABQLMHANGLIMIT', + 'ABQLMQUEUE', + 'ABQLMUSER', + 'CCI_RENDEZVOUS', + 'DOMAIN', + 'DOMAIN_CPUS', + 'DOUBLE_PRECISION', + 'FLEXLM_DIAGNOSTICS', + 'FOR0006', + 'FOR0064', + 'FOR_IGNORE_EXCEPTIONS', + 'FOR_DISABLE_DIAGNOSTIC_DISPLAY', + 'LD_PRELOAD', + 'MP_NUMBER_OF_THREADS', + 'MPC_GANG', + 'MPI_FLAGS', + 'MPI_FLUSH_FCACHE', + 'MPI_RDMA_NENVELOPE', + 'MPI_SOCKBUFSIZE', + 'MPI_USE_MALLOPT_MMAP_MAX', + 'MPI_USE_MALLOPT_MMAP_THRESHOLD', + 'MPI_USE_MALLOPT_SBRK_PROTECTION', + 'MPI_WORKDIR', + 'MPCCI_DEBUG', + 'MPCCI_CODEID', + 'MPCCI_JOBID', + 'MPCCI_NETDEVICE', + 'MPCCI_TINFO', + 'MPCCI_SERVER', + 'ABAQUS_CCI_DEBUG', + 'NCPUS', + 'OMP_DYNAMIC', + 'OMP_NUM_THREADS', + 'OUTDIR', + 'PAIDUP', + 'PARALLEL_METHOD', + 'RAIDEV_NDREG_LAZYMEM', + 'ABA_SYMBOLIC_GENERALCOLLAPSE', + 'ABA_SYMBOLIC_GENERAL_MAXCLIQUERANK', + 'ABA_ADM_MINIMUMINCREASE', + 'ABA_ADM_MINIMUMDECREASE', + 'IPATH_NO_CPUAFFINITY', + 'MALLOC_MMAP_THRESHOLD_', + 'ABA_EXT_SIMOUTPUT', + 'SMA_WS', + 'SMA_PARENT', + 'SMA_PLATFORM', + 'ABA_PRE_DECOMPOSITION', + 'ACML_FAST_MALLOC', + 'ACML_FAST_MALLOC_CHUNK_SIZE', + 'ACML_FAST_MALLOC_MAX_CHUNKS', + 'ACML_FAST_MALLOC_DEBUG') + +import driverUtils, os +#-*- mode: python -*- + +# # +# Compile and Link command settings for the Windows 64 Platform # +# ( AMD Opteron / Intel EM64T ) # +# # + + +compile_fortran=['ifort', + '/c','/DABQ_WIN86_64', '/u', + '/iface:cref', '/recursive', '/Qauto-scalar', + '/QxSSE3', '/QaxAVX', + '/heap-arrays:1', + # '/Od', '/Ob0' # <-- Optimization + # '/Zi', # <-- Debugging + '/include:%I', '/free', '/O1', '/fpp', '/openmp', '/Qmkl'] + +link_sl=['LINK', + '/nologo', '/NOENTRY', '/INCREMENTAL:NO', '/subsystem:console', '/machine:AMD64', + '/NODEFAULTLIB:LIBC.LIB', '/NODEFAULTLIB:LIBCMT.LIB', + '/DEFAULTLIB:OLDNAMES.LIB', '/DEFAULTLIB:LIBIFCOREMD.LIB', '/DEFAULTLIB:LIBIFPORTMD', '/DEFAULTLIB:LIBMMD.LIB', + '/DEFAULTLIB:kernel32.lib', '/DEFAULTLIB:user32.lib', '/DEFAULTLIB:advapi32.lib', + '/FIXED:NO', '/dll', + '/def:%E', '/out:%U', '%F', '%A', '%L', '%B', + 'oldnames.lib', 'user32.lib', 'ws2_32.lib', 'netapi32.lib', 'advapi32.lib'] + +link_exe=['LINK', + '/nologo', '/INCREMENTAL:NO', '/subsystem:console', '/machine:AMD64', '/STACK:20000000', + '/NODEFAULTLIB:LIBC.LIB', '/NODEFAULTLIB:LIBCMT.LIB', '/DEFAULTLIB:OLDNAMES.LIB', '/DEFAULTLIB:LIBIFCOREMD.LIB', + '/DEFAULTLIB:LIBIFPORTMD', '/DEFAULTLIB:LIBMMD.LIB', '/DEFAULTLIB:kernel32.lib', + '/DEFAULTLIB:user32.lib', '/DEFAULTLIB:advapi32.lib', + '/FIXED:NO', '/LARGEADDRESSAWARE', + '/out:%J', '%F', '%M', '%L', '%B', '%O', + 'oldnames.lib', 'user32.lib', 'ws2_32.lib', 'netapi32.lib', 'advapi32.lib'] + +# Link command to be used for MAKE w/o fortran compiler. +# remove the pound signs in order to remove the comments and have the file take effect. +# +#link_exe=['LINK', '/nologo', 'INCREMENTAL:NO', '/subsystem:console', '/machine:AMD64', '/NODEFAULTLIB:LIBC.LIB', '/NODEFAULTLIB:LIBCMT.LIB', +# '/DEFAULTLIB:OLDNAMES.LIB', '/DEFAULTLIB:MSVCRT.LIB', '/DEFAULTLIB:kernel32.lib', 'DEFAULTLIB:user32.lib', '/DEFAULTLIB:advapi32.lib', +# '/FIXED:NO', '/LARGEADDRESSAWARE', '/DEBUG', '/out:%J', '%F', '%M', '%L', '%B', '%O', 'oldnames.lib', 'user32.lib', 'ws2_32.lib', +# 'netapi32.lib', 'advapi32.lib] + +# MPI Configuration +mp_mode = THREADS + +mp_mpi_implementation = NATIVE +mp_rsh_command = 'dummy %H -l %U -n %C' +mp_mpirun_path = {} +mpirun = '' +progDir = os.environ.get('ProgramFiles','C:\\Program Files') + +for mpiDir in ('Microsoft HPC Pack', 'Microsoft HPC Pack 2008 R2', 'Microsoft HPC Pack 2008', 'Microsoft HPC Pack 2008 SDK'): + mpirun = progDir + os.sep + mpiDir + os.sep + 'bin' + os.sep + 'mpiexec.exe' + if os.path.exists(mpirun): + mp_mpirun_path[NATIVE] = mpirun + mp_mpirun_path[MSSDK] = os.path.join(progDir, mpiDir) + break + +if os.environ.has_key('CCP_HOME'): + from queueCCS import QueueCCS + queues['default'] = QueueCCS(queueName='share') + queues['share'] = QueueCCS(queueName='share') + queues['local'] = QueueCCS(queueName='local') + queues['genxmlshare'] = QueueCCS(queueName='genxmlshare') + queues['genxmllocal'] = QueueCCS(queueName='genxmllocal') + del QueueCCS + mpirun = os.path.join(os.environ['CCP_HOME'], 'bin', 'mpiexec.exe') + if os.path.exists(mpirun): + mp_mpirun_path[NATIVE] = mpirun + run_mode=BATCH + +if mp_mpirun_path: + mp_mode=MPI + +del progDir, mpiDir, mpirun + +graphicsEnv = driverUtils.locateFile(os.environ['ABA_PATH'],'site','graphicsConfig','env') +if graphicsEnv: + execfile(graphicsEnv) +else: + raise 'Cannot find the graphics configuration environment file (graphicsConfig.env)' + +del driverUtils, os, graphicsEnv +license_server_type=FLEXNET + +abaquslm_license_file="" +doc_root=" +doc_root_type="html" +academic=RESEARCH diff --git a/processing/post/addCompatibilityMismatch.py b/processing/post/addCompatibilityMismatch.py index 884702b93..3a73859e1 100755 --- a/processing/post/addCompatibilityMismatch.py +++ b/processing/post/addCompatibilityMismatch.py @@ -21,7 +21,7 @@ Operates on periodic three-dimensional x,y,z-ordered data sets. parser.add_option('-c','--coordinates', - dest = 'coords', + dest = 'pos', type = 'string', metavar = 'string', help = 'column heading of coordinates [%default]') parser.add_option('-f','--defgrad', @@ -36,10 +36,10 @@ parser.add_option('--no-volume','-v', dest = 'volume', action = 'store_false', help = 'omit volume mismatch') -parser.set_defaults(coords = 'pos', - defgrad = 'f', - shape = True, - volume = True, +parser.set_defaults(pos = 'pos', + defgrad = 'f', + shape = True, + volume = True, ) (options,filenames) = parser.parse_args() @@ -64,8 +64,8 @@ for name in filenames: errors = [] remarks = [] - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) - else: colCoord = table.label_index(options.coords) + if table.label_dimension(options.pos) != 3: errors.append('coordinates {} are not a vector.'.format(options.pos)) + else: colCoord = table.label_index(options.pos) if table.label_dimension(options.defgrad) != 9: errors.append('deformation gradient {} is not a tensor.'.format(options.defgrad)) else: colF = table.label_index(options.defgrad) diff --git a/processing/post/addCurl.py b/processing/post/addCurl.py index e368dc482..5c9a4b411 100755 --- a/processing/post/addCurl.py +++ b/processing/post/addCurl.py @@ -71,7 +71,7 @@ Deals with both vector- and tensor fields. """, version = scriptID) parser.add_option('-p','--pos','--periodiccellcenter', - dest = 'coords', + dest = 'pos', type = 'string', metavar = 'string', help = 'label of coordinates [%default]') parser.add_option('-v','--vector', @@ -83,7 +83,7 @@ parser.add_option('-t','--tensor', action = 'extend', metavar = '', help = 'label(s) of tensor field values') -parser.set_defaults(coords = 'pos', +parser.set_defaults(pos = 'pos', ) (options,filenames) = parser.parse_args() @@ -114,8 +114,8 @@ for name in filenames: remarks = [] column = {} - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) - else: colCoord = table.label_index(options.coords) + if table.label_dimension(options.pos) != 3: errors.append('coordinates {} are not a vector.'.format(options.pos)) + else: colCoord = table.label_index(options.pos) for type, data in items.iteritems(): for what in (data['labels'] if data['labels'] is not None else []): diff --git a/processing/post/addDisplacement.py b/processing/post/addDisplacement.py index 0bafdb6cc..782a7c24b 100755 --- a/processing/post/addDisplacement.py +++ b/processing/post/addDisplacement.py @@ -101,7 +101,7 @@ parser.add_option('-f', help = 'column label of deformation gradient [%default]') parser.add_option('-p', '--pos', '--position', - dest = 'coords', + dest = 'pos', metavar = 'string', help = 'label of coordinates [%default]') parser.add_option('--nodal', @@ -110,7 +110,7 @@ parser.add_option('--nodal', help = 'output nodal (instad of cell-centered) displacements') parser.set_defaults(defgrad = 'f', - coords = 'pos', + pos = 'pos', nodal = False, ) @@ -121,13 +121,14 @@ parser.set_defaults(defgrad = 'f', if filenames == []: filenames = [None] for name in filenames: + outname = (os.path.splitext(name)[0] + + '_nodal' + + os.path.splitext(name)[1]) if (options.nodal and name) else None try: table = damask.ASCIItable(name = name, - outname = (os.path.splitext(name)[0] + - '_nodal' + - os.path.splitext(name)[1]) if (options.nodal and name) else None, + outname = outname, buffered = False) except: continue - damask.util.report(scriptName,name) + damask.util.report(scriptName,'{}{}'.format(name,' --> {}'.format(outname) if outname else '')) # ------------------------------------------ read header ------------------------------------------ @@ -141,13 +142,13 @@ for name in filenames: if table.label_dimension(options.defgrad) != 9: errors.append('deformation gradient "{}" is not a 3x3 tensor.'.format(options.defgrad)) - coordDim = table.label_dimension(options.coords) + coordDim = table.label_dimension(options.pos) if not 3 >= coordDim >= 1: - errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords)) + errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos)) elif coordDim < 3: remarks.append('appending {} dimension{} to coordinates "{}"...'.format(3-coordDim, 's' if coordDim < 2 else '', - options.coords)) + options.pos)) if remarks != []: damask.util.croak(remarks) if errors != []: @@ -157,7 +158,7 @@ for name in filenames: # --------------- figure out size and grid --------------------------------------------------------- - table.data_readArray([options.defgrad,options.coords]) + table.data_readArray([options.defgrad,options.pos]) table.data_rewind() if len(table.data.shape) < 2: table.data.shape += (1,) # expand to 2D shape @@ -196,8 +197,8 @@ for name in filenames: table.info_append(scriptID + '\t' + ' '.join(sys.argv[1:])) table.labels_append((['{}_pos' .format(i+1) for i in xrange(3)] if options.nodal else []) + - ['{}_avg({}).{}' .format(i+1,options.defgrad,options.coords) for i in xrange(3)] + - ['{}_fluct({}).{}'.format(i+1,options.defgrad,options.coords) for i in xrange(3)] ) + ['{}_avg({}).{}' .format(i+1,options.defgrad,options.pos) for i in xrange(3)] + + ['{}_fluct({}).{}'.format(i+1,options.defgrad,options.pos) for i in xrange(3)] ) table.head_write() # ------------------------------------------ output data ------------------------------------------- diff --git a/processing/post/addDivergence.py b/processing/post/addDivergence.py index d62c02fda..f13cb3a89 100755 --- a/processing/post/addDivergence.py +++ b/processing/post/addDivergence.py @@ -57,7 +57,7 @@ Deals with both vector- and tensor-valued fields. """, version = scriptID) parser.add_option('-p','--pos','--periodiccellcenter', - dest = 'coords', + dest = 'pos', type = 'string', metavar = 'string', help = 'label of coordinates [%default]') parser.add_option('-v','--vector', @@ -69,7 +69,7 @@ parser.add_option('-t','--tensor', action = 'extend', metavar = '', help = 'label(s) of tensor field values') -parser.set_defaults(coords = 'pos', +parser.set_defaults(pos = 'pos', ) (options,filenames) = parser.parse_args() @@ -100,8 +100,8 @@ for name in filenames: remarks = [] column = {} - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) - else: colCoord = table.label_index(options.coords) + if table.label_dimension(options.pos) != 3: errors.append('coordinates {} are not a vector.'.format(options.pos)) + else: colCoord = table.label_index(options.pos) for type, data in items.iteritems(): for what in (data['labels'] if data['labels'] is not None else []): diff --git a/processing/post/addEuclideanDistance.py b/processing/post/addEuclideanDistance.py index 16c8e5234..4b3f518d5 100755 --- a/processing/post/addEuclideanDistance.py +++ b/processing/post/addEuclideanDistance.py @@ -90,7 +90,7 @@ Add column(s) containing Euclidean distance to grain structural features: bounda parser.add_option('-p', '--pos', '--position', - dest = 'coords', metavar = 'string', + dest = 'pos', metavar = 'string', help = 'label of coordinates [%default]') parser.add_option('-i', '--id', '--identifier', @@ -109,7 +109,7 @@ parser.add_option('-s', dest = 'scale', type = 'float', metavar = 'float', help = 'voxel size [%default]') -parser.set_defaults(coords = 'pos', +parser.set_defaults(pos = 'pos', id = 'texture', neighborhood = 'neumann', scale = 1.0, @@ -151,15 +151,17 @@ for name in filenames: remarks = [] column = {} - coordDim = table.label_dimension(options.coords) + coordDim = table.label_dimension(options.pos) if not 3 >= coordDim >= 1: - errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.coords)) - else: coordCol = table.label_index(options.coords) + errors.append('coordinates "{}" need to have one, two, or three dimensions.'.format(options.pos)) + else: coordCol = table.label_index(options.pos) if table.label_dimension(options.id) != 1: errors.append('grain identifier {} not found.'.format(options.id)) else: idCol = table.label_index(options.id) - if remarks != []: damask.util.croak(remarks) + if remarks != []: + damask.util.croak(remarks) + remarks = [] if errors != []: damask.util.croak(errors) table.close(dismiss = True) @@ -184,6 +186,8 @@ for name in filenames: N = grid.prod() if N != len(table.data): errors.append('data count {} does not match grid {}.'.format(N,'x'.join(map(str,grid)))) + else: remarks.append('grid: {}x{}x{}'.format(*grid)) + if remarks != []: damask.util.croak(remarks) if errors != []: damask.util.croak(errors) table.close(dismiss = True) @@ -194,33 +198,37 @@ for name in filenames: stack = [table.data] neighborhood = neighborhoods[options.neighborhood] - convoluted = np.empty([len(neighborhood)]+list(grid+2),'i') - microstructure = periodic_3Dpad(np.array(table.data[:,idCol].reshape(grid),'i')) - + diffToNeighbor = np.empty(list(grid+2)+[len(neighborhood)],'i') + microstructure = periodic_3Dpad(table.data[:,idCol].astype('i').reshape(grid,order='F')) + for i,p in enumerate(neighborhood): stencil = np.zeros((3,3,3),'i') stencil[1,1,1] = -1 stencil[p[0]+1, p[1]+1, p[2]+1] = 1 - convoluted[i,:,:,:] = ndimage.convolve(microstructure,stencil) + diffToNeighbor[:,:,:,i] = ndimage.convolve(microstructure,stencil) # compare ID at each point... + # ...to every one in the specified neighborhood + # for same IDs at both locations ==> 0 - distance = np.ones((len(feature_list),grid[0],grid[1],grid[2]),'d') - - convoluted = np.sort(convoluted,axis = 0) - uniques = np.where(convoluted[0,1:-1,1:-1,1:-1] != 0, 1,0) # initialize unique value counter (exclude myself [= 0]) + diffToNeighbor = np.sort(diffToNeighbor) # sort diff such that number of changes in diff (steps)... + # ...reflects number of unique neighbors + uniques = np.where(diffToNeighbor[1:-1,1:-1,1:-1,0] != 0, 1,0) # initialize unique value counter (exclude myself [= 0]) for i in xrange(1,len(neighborhood)): # check remaining points in neighborhood uniques += np.where(np.logical_and( - convoluted[i,1:-1,1:-1,1:-1] != convoluted[i-1,1:-1,1:-1,1:-1], # flip of ID difference detected? - convoluted[i,1:-1,1:-1,1:-1] != 0), # not myself? - 1,0) # count flip + diffToNeighbor[1:-1,1:-1,1:-1,i] != 0, # not myself? + diffToNeighbor[1:-1,1:-1,1:-1,i] != diffToNeighbor[1:-1,1:-1,1:-1,i-1], + ), # flip of ID difference detected? + 1,0) # count that flip + + distance = np.ones((len(feature_list),grid[0],grid[1],grid[2]),'d') for i,feature_id in enumerate(feature_list): distance[i,:,:,:] = np.where(uniques >= features[feature_id]['aliens'],0.0,1.0) # seed with 0.0 when enough unique neighbor IDs are present distance[i,:,:,:] = ndimage.morphology.distance_transform_edt(distance[i,:,:,:])*[options.scale]*3 - distance.shape = ([len(feature_list),grid.prod(),1]) + distance = distance.reshape([len(feature_list),grid.prod(),1],order='F') for i in xrange(len(feature_list)): stack.append(distance[i,:]) diff --git a/processing/post/addGradient.py b/processing/post/addGradient.py index b2d004bd2..12ecf218d 100755 --- a/processing/post/addGradient.py +++ b/processing/post/addGradient.py @@ -61,7 +61,7 @@ Deals with both vector- and scalar fields. """, version = scriptID) parser.add_option('-p','--pos','--periodiccellcenter', - dest = 'coords', + dest = 'pos', type = 'string', metavar = 'string', help = 'label of coordinates [%default]') parser.add_option('-v','--vector', @@ -73,7 +73,7 @@ parser.add_option('-s','--scalar', action = 'extend', metavar = '', help = 'label(s) of scalar field values') -parser.set_defaults(coords = 'pos', +parser.set_defaults(pos = 'pos', ) (options,filenames) = parser.parse_args() @@ -104,8 +104,8 @@ for name in filenames: remarks = [] column = {} - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) - else: colCoord = table.label_index(options.coords) + if table.label_dimension(options.pos) != 3: errors.append('coordinates {} are not a vector.'.format(options.pos)) + else: colCoord = table.label_index(options.pos) for type, data in items.iteritems(): for what in (data['labels'] if data['labels'] is not None else []): diff --git a/processing/post/averageDown.py b/processing/post/averageDown.py index 0af56e176..808ed28bb 100755 --- a/processing/post/averageDown.py +++ b/processing/post/averageDown.py @@ -20,7 +20,7 @@ Average each data block of size 'packing' into single values thus reducing the f """, version = scriptID) parser.add_option('-c','--coordinates', - dest = 'coords', + dest = 'pos', type = 'string', metavar = 'string', help = 'column label of coordinates [%default]') parser.add_option('-p','--packing', @@ -39,7 +39,7 @@ parser.add_option('-s', '--size', dest = 'size', type = 'float', nargs = 3, metavar = 'float float float', help = 'size in x,y,z [autodetect]') -parser.set_defaults(coords = 'pos', +parser.set_defaults(pos = 'pos', packing = (2,2,2), shift = (0,0,0), grid = (0,0,0), @@ -75,8 +75,8 @@ for name in filenames: errors = [] remarks = [] - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) - else: colCoord = table.label_index(options.coords) + if table.label_dimension(options.pos) != 3: errors.append('coordinates {} are not a vector.'.format(options.pos)) + else: colCoord = table.label_index(options.pos) if remarks != []: damask.util.croak(remarks) if errors != []: diff --git a/processing/post/blowUp.py b/processing/post/blowUp.py index 7e6266837..6a73fe15a 100755 --- a/processing/post/blowUp.py +++ b/processing/post/blowUp.py @@ -20,7 +20,7 @@ to resolution*packing. """, version = scriptID) parser.add_option('-c','--coordinates', - dest = 'coords', metavar = 'string', + dest = 'pos', metavar = 'string', help = 'column label of coordinates [%default]') parser.add_option('-p','--packing', dest = 'packing', type = 'int', nargs = 3, metavar = 'int int int', @@ -31,7 +31,7 @@ parser.add_option('-g','--grid', parser.add_option('-s','--size', dest = 'dimension', type = 'float', nargs = 3, metavar = 'int int int', help = 'dimension in x,y,z [autodetect]') -parser.set_defaults(coords = 'pos', +parser.set_defaults(pos = 'pos', packing = (2,2,2), grid = (0,0,0), size = (0.0,0.0,0.0), @@ -63,8 +63,8 @@ for name in filenames: errors = [] remarks = [] - if table.label_dimension(options.coords) != 3: errors.append('coordinates {} are not a vector.'.format(options.coords)) - else: colCoord = table.label_index(options.coords) + if table.label_dimension(options.pos) != 3: errors.append('coordinates {} are not a vector.'.format(options.pos)) + else: colCoord = table.label_index(options.pos) colElem = table.label_index('elem') @@ -76,7 +76,7 @@ for name in filenames: # --------------- figure out size and grid --------------------------------------------------------- - table.data_readArray(options.coords) + table.data_readArray(options.pos) table.data_rewind() coords = [np.unique(table.data[:,i]) for i in xrange(3)] diff --git a/processing/post/sortTable.py b/processing/post/sortTable.py index 92fe221ee..e4f3c8dd0 100755 --- a/processing/post/sortTable.py +++ b/processing/post/sortTable.py @@ -14,7 +14,7 @@ scriptID = ' '.join([scriptName,damask.version]) # -------------------------------------------------------------------- parser = OptionParser(option_class=damask.extendableOption, usage='%prog options [file[s]]', description = """ -Sort rows by given column label(s). +Sort rows by given (or all) column label(s). Examples: With coordinates in columns "x", "y", and "z"; sorting with x slowest and z fastest varying index: --label x,y,z. @@ -30,25 +30,19 @@ parser.add_option('-r','--reverse', action = 'store_true', help = 'sort in reverse') -parser.set_defaults(key = [], - reverse = False, +parser.set_defaults(reverse = False, ) (options,filenames) = parser.parse_args() -if options.keys is None: - parser.error('No sorting column(s) specified.') - -options.keys.reverse() # numpy sorts with most significant column as last # --- loop over input files ------------------------------------------------------------------------- if filenames == []: filenames = [None] for name in filenames: - try: - table = damask.ASCIItable(name = name, - buffered = False) + try: table = damask.ASCIItable(name = name, + buffered = False) except: continue damask.util.report(scriptName,name) @@ -61,15 +55,16 @@ for name in filenames: # ------------------------------------------ process data --------------------------------------- table.data_readArray() + + keys = table.labels[::-1] if options.keys is None else options.keys[::-1] # numpy sorts with most significant column as last + cols = [] remarks = [] - for i,column in enumerate(table.label_index(options.keys)): - if column < 0: - remarks.append("label {0} not present.".format(options.keys[i])) - else: - cols += [table.data[:,column]] + for i,column in enumerate(table.label_index(keys)): + if column < 0: remarks.append('label "{}" not present...'.format(keys[i])) + else: cols += [table.data[:,column]] if remarks != []: damask.util.croak(remarks) - + ind = np.lexsort(cols) if cols != [] else np.arange(table.data.shape[0]) if options.reverse: ind = ind[::-1] diff --git a/processing/post/stddevDown.py b/processing/post/stddevDown.py index e14ee0058..f4fdf5a90 100755 --- a/processing/post/stddevDown.py +++ b/processing/post/stddevDown.py @@ -19,7 +19,7 @@ to resolution/packing. """, version = scriptID) -parser.add_option('-c','--coordinates', dest='coords', type='string',\ +parser.add_option('-c','--coordinates', dest='pos', type='string',\ help='column heading for coordinates [%default]') parser.add_option('-p','--packing', dest='packing', type='int', nargs=3, \ help='dimension of packed group %default') @@ -29,7 +29,7 @@ parser.add_option('-r','--resolution', dest='resolution', type='int', nargs=3, help='resolution in x,y,z [autodetect]') parser.add_option('-d','--dimension', dest='dimension', type='float', nargs=3, \ help='dimension in x,y,z [autodetect]') -parser.set_defaults(coords = 'ipinitialcoord') +parser.set_defaults(coords = 'pos') parser.set_defaults(packing = [2,2,2]) parser.set_defaults(shift = [0,0,0]) parser.set_defaults(resolution = [0,0,0]) @@ -75,12 +75,12 @@ for file in files: # --------------- figure out size and grid --------------------------------------------------------- try: - locationCol = table.labels.index('1_%s'%options.coords) # columns containing location data + locationCol = table.labels.index('1_%s'%options.pos) # columns containing location data except ValueError: try: - locationCol = table.labels.index('%s.x'%options.coords) # columns containing location data (legacy naming scheme) + locationCol = table.labels.index('%s.x'%options.pos) # columns containing location data (legacy naming scheme) except ValueError: - file['croak'].write('no coordinate data (1_%s/%s.x) found...\n'%(options.coords,options.coords)) + file['croak'].write('no coordinate data (1_%s/%s.x) found...\n'%(options.pos,options.pos)) continue if (any(options.resolution)==0 or any(options.dimension)==0.0): diff --git a/processing/pre/geom_fromTable.py b/processing/pre/geom_fromTable.py index 53281c5df..5466236e8 100755 --- a/processing/pre/geom_fromTable.py +++ b/processing/pre/geom_fromTable.py @@ -21,7 +21,7 @@ Generate geometry description and material configuration from position, phase, a """, version = scriptID) parser.add_option('--coordinates', - dest = 'coordinates', + dest = 'pos', type = 'string', metavar = 'string', help = 'coordinates label') parser.add_option('--phase', @@ -135,11 +135,11 @@ for name in filenames: # ------------------------------------------ sanity checks --------------------------------------- - coordDim = table.label_dimension(options.coordinates) + coordDim = table.label_dimension(options.pos) errors = [] if not 3 >= coordDim >= 2: - errors.append('coordinates "{}" need to have two or three dimensions.'.format(options.coordinates)) + errors.append('coordinates "{}" need to have two or three dimensions.'.format(options.pos)) if not np.all(table.label_dimension(label) == dim): errors.append('input "{}" needs to have dimension {}.'.format(label,dim)) if options.phase and table.label_dimension(options.phase) != 1: @@ -150,7 +150,7 @@ for name in filenames: table.close(dismiss = True) continue - table.data_readArray([options.coordinates] \ + table.data_readArray([options.pos] \ + ([label] if isinstance(label, types.StringTypes) else label) \ + ([options.phase] if options.phase else []))