diff --git a/env/CONFIG b/env/CONFIG index 52057526a..4407f4d2b 100644 --- a/env/CONFIG +++ b/env/CONFIG @@ -1,5 +1,5 @@ # "set"-syntax needed only for tcsh (but works with bash and zsh) -set DAMASK_NUM_THREADS = 4 +set OMP_NUM_THREADS = 4 -set MSC_ROOT = /opt/msc -set MSC_VERSION = 2020 +set MSC_ROOT = /opt/msc +set MSC_VERSION = 2020 diff --git a/env/DAMASK.csh b/env/DAMASK.csh index cc61449d2..58f2a150e 100644 --- a/env/DAMASK.csh +++ b/env/DAMASK.csh @@ -10,8 +10,8 @@ source $ENV_ROOT/CONFIG set path = ($DAMASK_ROOT/bin $path) set SOLVER=`which DAMASK_grid` -if ( "x$DAMASK_NUM_THREADS" == "x" ) then - set DAMASK_NUM_THREADS=1 +if ( "x$OMP_NUM_THREADS" == "x" ) then + set OMP_NUM_THREADS=1 endif # currently, there is no information that unlimited stack size causes problems @@ -38,13 +38,13 @@ if ( $?prompt ) then echo "MSC.Marc/Mentat $MSC_ROOT" endif echo - echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS" + echo "Multithreading OMP_NUM_THREADS=$OMP_NUM_THREADS" echo `limit datasize` echo `limit stacksize` echo endif -setenv DAMASK_NUM_THREADS $DAMASK_NUM_THREADS +setenv OMP_NUM_THREADS $OMP_NUM_THREADS if ( ! $?PYTHONPATH ) then setenv PYTHONPATH $DAMASK_ROOT/python else diff --git a/env/DAMASK.sh b/env/DAMASK.sh index 2151e842b..24a2c2de3 100644 --- a/env/DAMASK.sh +++ b/env/DAMASK.sh @@ -38,7 +38,7 @@ PATH=${DAMASK_ROOT}/bin:$PATH SOLVER=$(type -p DAMASK_grid || true 2>/dev/null) [ "x$SOLVER" == "x" ] && SOLVER=$(blink 'Not found!') -[ "x$DAMASK_NUM_THREADS" == "x" ] && DAMASK_NUM_THREADS=1 +[ "x$OMP_NUM_THREADS" == "x" ] && OMP_NUM_THREADS=1 # currently, there is no information that unlimited stack size causes problems # still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it @@ -66,7 +66,7 @@ if [ ! -z "$PS1" ]; then echo -n "MSC.Marc/Mentat " [ -d $MSC_ROOT ] && echo $MSC_ROOT || blink $MSC_ROOT echo - echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS" + echo "Multithreading OMP_NUM_THREADS=$OMP_NUM_THREADS" echo -n "heap size " [[ "$(ulimit -d)" == "unlimited" ]] \ && echo "unlimited" \ @@ -86,7 +86,7 @@ if [ ! -z "$PS1" ]; then echo fi -export DAMASK_NUM_THREADS +export OMP_NUM_THREADS export PYTHONPATH=$DAMASK_ROOT/python:$PYTHONPATH for var in BASE STAT SOLVER BRANCH; do diff --git a/env/DAMASK.zsh b/env/DAMASK.zsh index 377aa5304..e12a5248a 100644 --- a/env/DAMASK.zsh +++ b/env/DAMASK.zsh @@ -30,7 +30,7 @@ PATH=${DAMASK_ROOT}/bin:$PATH SOLVER=$(which DAMASK_grid || true 2>/dev/null) [[ "x$SOLVER" == "x" ]] && SOLVER=$(blink 'Not found!') -[[ "x$DAMASK_NUM_THREADS" == "x" ]] && DAMASK_NUM_THREADS=1 +[[ "x$OMP_NUM_THREADS" == "x" ]] && OMP_NUM_THREADS=1 # currently, there is no information that unlimited stack size causes problems # still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it @@ -60,7 +60,7 @@ if [ ! -z "$PS1" ]; then echo -n "MSC.Marc/Mentat " [ -d $MSC_ROOT ] && echo $MSC_ROOT || blink $MSC_ROOT echo - echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS" + echo "Multithreading OMP_NUM_THREADS=$OMP_NUM_THREADS" echo -n "heap size " [[ "$(ulimit -d)" == "unlimited" ]] \ && echo "unlimited" \ @@ -80,7 +80,7 @@ if [ ! -z "$PS1" ]; then echo fi -export DAMASK_NUM_THREADS +export OMP_NUM_THREADS export PYTHONPATH=$DAMASK_ROOT/python:$PYTHONPATH for var in SOLVER BRANCH; do diff --git a/python/damask/_environment.py b/python/damask/_environment.py index 7d93b83e0..30110e7ae 100644 --- a/python/damask/_environment.py +++ b/python/damask/_environment.py @@ -26,10 +26,7 @@ class Environment: @property def options(self): options = {} - for item in ['DAMASK_NUM_THREADS', - 'MSC_ROOT', - 'MSC_VERSION', - ]: + for item in ['MSC_ROOT', 'MSC_VERSION']: options[item] = os.environ[item] if item in os.environ else None return options diff --git a/python/damask/_grid.py b/python/damask/_grid.py index 0edec05f9..2d3e59b37 100644 --- a/python/damask/_grid.py +++ b/python/damask/_grid.py @@ -1,7 +1,7 @@ import copy import multiprocessing as mp from functools import partial -from os import path +import os import warnings import numpy as np @@ -278,14 +278,14 @@ class Grid: """ root_dir ='DataContainers' f = h5py.File(fname, 'r') - g = path.join(root_dir,base_group,'_SIMPL_GEOMETRY') - cells = f[path.join(g,'DIMENSIONS')][()] - size = f[path.join(g,'SPACING')][()] * cells - origin = f[path.join(g,'ORIGIN')][()] + g = os.path.join(root_dir,base_group,'_SIMPL_GEOMETRY') + cells = f[os.path.join(g,'DIMENSIONS')][()] + size = f[os.path.join(g,'SPACING')][()] * cells + origin = f[os.path.join(g,'ORIGIN')][()] ma = np.arange(cells.prod(),dtype=int) \ if point_data is None else \ - np.reshape(f[path.join(root_dir,base_group,point_data,material)],cells.prod()) + np.reshape(f[os.path.join(root_dir,base_group,point_data,material)],cells.prod()) return Grid(ma.reshape(cells,order='F'),size,origin,util.execution_stamp('Grid','load_DREAM3D')) @@ -355,7 +355,7 @@ class Grid: seeds_p = seeds coords = grid_filters.coordinates0_point(cells,size).reshape(-1,3) - pool = mp.Pool(processes = int(environment.options['DAMASK_NUM_THREADS'])) + pool = mp.Pool(int(os.environ.get('OMP_NUM_THREADS',1))) result = pool.map_async(partial(Grid._find_closest_seed,seeds_p,weights_p), [coord for coord in coords]) pool.close() pool.join() diff --git a/python/damask/_result.py b/python/damask/_result.py index 5e37042e0..3f788f229 100644 --- a/python/damask/_result.py +++ b/python/damask/_result.py @@ -1134,8 +1134,7 @@ class Result: """ chunk_size = 1024**2//8 - num_threads = damask.environment.options['DAMASK_NUM_THREADS'] - pool = mp.Pool(int(num_threads) if num_threads is not None else None) + pool = mp.Pool(int(os.environ.get('OMP_NUM_THREADS',1))) lock = mp.Manager().Lock() groups = self.groups_with_datasets(datasets.values()) diff --git a/src/parallelization.f90 b/src/parallelization.f90 index 11a3574ec..edf56407b 100644 --- a/src/parallelization.f90 +++ b/src/parallelization.f90 @@ -32,7 +32,7 @@ contains subroutine parallelization_init integer :: err, typeSize -!$ integer :: got_env, DAMASK_NUM_THREADS, threadLevel +!$ integer :: got_env, OMP_NUM_THREADS, threadLevel !$ character(len=6) NumThreadsString #ifdef PETSc PetscErrorCode :: petsc_err @@ -87,19 +87,19 @@ call MPI_Comm_rank(PETSC_COMM_WORLD,worldrank,err) open(OUTPUT_UNIT,file='/dev/null',status='replace') ! close() alone will leave some temp files in cwd endif -!$ call get_environment_variable(name='DAMASK_NUM_THREADS',value=NumThreadsString,STATUS=got_env) +!$ call get_environment_variable(name='OMP_NUM_THREADS',value=NumThreadsString,STATUS=got_env) !$ if(got_env /= 0) then -!$ print*, 'Could not determine value of $DAMASK_NUM_THREADS' -!$ DAMASK_NUM_THREADS = 1_pI32 +!$ print*, 'Could not determine value of $OMP_NUM_THREADS' +!$ OMP_NUM_THREADS = 1_pI32 !$ else -!$ read(NumThreadsString,'(i6)') DAMASK_NUM_THREADS -!$ if (DAMASK_NUM_THREADS < 1_pI32) then -!$ print*, 'Invalid DAMASK_NUM_THREADS: '//trim(NumThreadsString) -!$ DAMASK_NUM_THREADS = 1_pI32 +!$ read(NumThreadsString,'(i6)') OMP_NUM_THREADS +!$ if (OMP_NUM_THREADS < 1_pI32) then +!$ print*, 'Invalid OMP_NUM_THREADS: '//trim(NumThreadsString) +!$ OMP_NUM_THREADS = 1_pI32 !$ endif !$ endif -!$ print'(a,i2)', ' DAMASK_NUM_THREADS: ',DAMASK_NUM_THREADS -!$ call omp_set_num_threads(DAMASK_NUM_THREADS) +!$ print'(a,i2)', ' OMP_NUM_THREADS: ',OMP_NUM_THREADS +!$ call omp_set_num_threads(OMP_NUM_THREADS) end subroutine parallelization_init