Merge branch 'polishing-for-release' into 'development'
Polishing for release See merge request damask/DAMASK!322
This commit is contained in:
commit
5b9a2d7bf7
2
PRIVATE
2
PRIVATE
|
@ -1 +1 @@
|
||||||
Subproject commit b1a31a79cc90d458494068a96cfd3e9497aa330c
|
Subproject commit 3ab1053b8c83248dc8654a22a3caea815c0813db
|
|
@ -1,5 +1,5 @@
|
||||||
# "set"-syntax needed only for tcsh (but works with bash and zsh)
|
# "set"-syntax needed only for tcsh (but works with bash and zsh)
|
||||||
set DAMASK_NUM_THREADS = 4
|
set OMP_NUM_THREADS = 4
|
||||||
|
|
||||||
set MSC_ROOT = /opt/msc
|
set MSC_ROOT = /opt/msc
|
||||||
set MSC_VERSION = 2020
|
set MSC_VERSION = 2020
|
||||||
|
|
|
@ -1,54 +0,0 @@
|
||||||
# sets up an environment for DAMASK on tcsh
|
|
||||||
# usage: source DAMASK_env.csh
|
|
||||||
|
|
||||||
set CALLED=($_)
|
|
||||||
set ENV_ROOT=`dirname $CALLED[2]`
|
|
||||||
set DAMASK_ROOT=`python3 -c "import os,sys; print(os.path.realpath(os.path.expanduser(sys.argv[1])))" $ENV_ROOT"/../"`
|
|
||||||
|
|
||||||
source $ENV_ROOT/CONFIG
|
|
||||||
|
|
||||||
set path = ($DAMASK_ROOT/bin $path)
|
|
||||||
|
|
||||||
set SOLVER=`which DAMASK_grid`
|
|
||||||
if ( "x$DAMASK_NUM_THREADS" == "x" ) then
|
|
||||||
set DAMASK_NUM_THREADS=1
|
|
||||||
endif
|
|
||||||
|
|
||||||
# currently, there is no information that unlimited stack size causes problems
|
|
||||||
# still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it
|
|
||||||
# more info https://jblevins.org/log/segfault
|
|
||||||
# https://stackoverflow.com/questions/79923/what-and-where-are-the-stack-and-heap
|
|
||||||
# http://superuser.com/questions/220059/what-parameters-has-ulimit
|
|
||||||
limit stacksize unlimited # maximum stack size (kB)
|
|
||||||
|
|
||||||
# disable output in case of scp
|
|
||||||
if ( $?prompt ) then
|
|
||||||
echo ''
|
|
||||||
echo Düsseldorf Advanced Materials Simulation Kit --- DAMASK
|
|
||||||
echo Max-Planck-Institut für Eisenforschung GmbH, Düsseldorf
|
|
||||||
echo https://damask.mpie.de
|
|
||||||
echo
|
|
||||||
echo Using environment with ...
|
|
||||||
echo "DAMASK $DAMASK_ROOT"
|
|
||||||
echo "Grid Solver $SOLVER"
|
|
||||||
if ( $?PETSC_DIR) then
|
|
||||||
echo "PETSc location $PETSC_DIR"
|
|
||||||
endif
|
|
||||||
if ( $?MSC_ROOT) then
|
|
||||||
echo "MSC.Marc/Mentat $MSC_ROOT"
|
|
||||||
endif
|
|
||||||
echo
|
|
||||||
echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS"
|
|
||||||
echo `limit datasize`
|
|
||||||
echo `limit stacksize`
|
|
||||||
echo
|
|
||||||
endif
|
|
||||||
|
|
||||||
setenv DAMASK_NUM_THREADS $DAMASK_NUM_THREADS
|
|
||||||
if ( ! $?PYTHONPATH ) then
|
|
||||||
setenv PYTHONPATH $DAMASK_ROOT/python
|
|
||||||
else
|
|
||||||
setenv PYTHONPATH $DAMASK_ROOT/python:$PYTHONPATH
|
|
||||||
endif
|
|
||||||
setenv MSC_ROOT
|
|
||||||
setenv MSC_VERSION
|
|
|
@ -38,7 +38,7 @@ PATH=${DAMASK_ROOT}/bin:$PATH
|
||||||
SOLVER=$(type -p DAMASK_grid || true 2>/dev/null)
|
SOLVER=$(type -p DAMASK_grid || true 2>/dev/null)
|
||||||
[ "x$SOLVER" == "x" ] && SOLVER=$(blink 'Not found!')
|
[ "x$SOLVER" == "x" ] && SOLVER=$(blink 'Not found!')
|
||||||
|
|
||||||
[ "x$DAMASK_NUM_THREADS" == "x" ] && DAMASK_NUM_THREADS=1
|
[ "x$OMP_NUM_THREADS" == "x" ] && OMP_NUM_THREADS=1
|
||||||
|
|
||||||
# currently, there is no information that unlimited stack size causes problems
|
# currently, there is no information that unlimited stack size causes problems
|
||||||
# still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it
|
# still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it
|
||||||
|
@ -66,7 +66,7 @@ if [ ! -z "$PS1" ]; then
|
||||||
echo -n "MSC.Marc/Mentat "
|
echo -n "MSC.Marc/Mentat "
|
||||||
[ -d $MSC_ROOT ] && echo $MSC_ROOT || blink $MSC_ROOT
|
[ -d $MSC_ROOT ] && echo $MSC_ROOT || blink $MSC_ROOT
|
||||||
echo
|
echo
|
||||||
echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS"
|
echo "Multithreading OMP_NUM_THREADS=$OMP_NUM_THREADS"
|
||||||
echo -n "heap size "
|
echo -n "heap size "
|
||||||
[[ "$(ulimit -d)" == "unlimited" ]] \
|
[[ "$(ulimit -d)" == "unlimited" ]] \
|
||||||
&& echo "unlimited" \
|
&& echo "unlimited" \
|
||||||
|
@ -86,11 +86,13 @@ if [ ! -z "$PS1" ]; then
|
||||||
echo
|
echo
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export DAMASK_NUM_THREADS
|
export OMP_NUM_THREADS
|
||||||
|
export MSC_ROOT
|
||||||
|
export MSC_VERSION
|
||||||
|
export DAMASK_ROOT
|
||||||
export PYTHONPATH=$DAMASK_ROOT/python:$PYTHONPATH
|
export PYTHONPATH=$DAMASK_ROOT/python:$PYTHONPATH
|
||||||
|
|
||||||
for var in BASE STAT SOLVER BRANCH; do
|
for var in BASE STAT SOLVER BRANCH; do
|
||||||
unset "${var}"
|
unset "${var}"
|
||||||
done
|
done
|
||||||
unset "ENV_ROOT"
|
unset "ENV_ROOT"
|
||||||
unset "DAMASK_ROOT"
|
|
||||||
|
|
|
@ -30,7 +30,7 @@ PATH=${DAMASK_ROOT}/bin:$PATH
|
||||||
SOLVER=$(which DAMASK_grid || true 2>/dev/null)
|
SOLVER=$(which DAMASK_grid || true 2>/dev/null)
|
||||||
[[ "x$SOLVER" == "x" ]] && SOLVER=$(blink 'Not found!')
|
[[ "x$SOLVER" == "x" ]] && SOLVER=$(blink 'Not found!')
|
||||||
|
|
||||||
[[ "x$DAMASK_NUM_THREADS" == "x" ]] && DAMASK_NUM_THREADS=1
|
[[ "x$OMP_NUM_THREADS" == "x" ]] && OMP_NUM_THREADS=1
|
||||||
|
|
||||||
# currently, there is no information that unlimited stack size causes problems
|
# currently, there is no information that unlimited stack size causes problems
|
||||||
# still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it
|
# still, http://software.intel.com/en-us/forums/topic/501500 suggest to fix it
|
||||||
|
@ -60,7 +60,7 @@ if [ ! -z "$PS1" ]; then
|
||||||
echo -n "MSC.Marc/Mentat "
|
echo -n "MSC.Marc/Mentat "
|
||||||
[ -d $MSC_ROOT ] && echo $MSC_ROOT || blink $MSC_ROOT
|
[ -d $MSC_ROOT ] && echo $MSC_ROOT || blink $MSC_ROOT
|
||||||
echo
|
echo
|
||||||
echo "Multithreading DAMASK_NUM_THREADS=$DAMASK_NUM_THREADS"
|
echo "Multithreading OMP_NUM_THREADS=$OMP_NUM_THREADS"
|
||||||
echo -n "heap size "
|
echo -n "heap size "
|
||||||
[[ "$(ulimit -d)" == "unlimited" ]] \
|
[[ "$(ulimit -d)" == "unlimited" ]] \
|
||||||
&& echo "unlimited" \
|
&& echo "unlimited" \
|
||||||
|
@ -80,11 +80,13 @@ if [ ! -z "$PS1" ]; then
|
||||||
echo
|
echo
|
||||||
fi
|
fi
|
||||||
|
|
||||||
export DAMASK_NUM_THREADS
|
export OMP_NUM_THREADS
|
||||||
|
export MSC_ROOT
|
||||||
|
export MSC_VERSION
|
||||||
|
export DAMASK_ROOT
|
||||||
export PYTHONPATH=$DAMASK_ROOT/python:$PYTHONPATH
|
export PYTHONPATH=$DAMASK_ROOT/python:$PYTHONPATH
|
||||||
|
|
||||||
for var in SOLVER BRANCH; do
|
for var in SOLVER BRANCH; do
|
||||||
unset "${var}"
|
unset "${var}"
|
||||||
done
|
done
|
||||||
unset "ENV_ROOT"
|
unset "ENV_ROOT"
|
||||||
unset "DAMASK_ROOT"
|
|
||||||
|
|
|
@ -5,13 +5,9 @@ import glob
|
||||||
import argparse
|
import argparse
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
import damask
|
msc_version = os.environ['MSC_VERSION']
|
||||||
|
msc_root = Path(os.environ['MSC_ROOT'])
|
||||||
msc_version = float(damask.environment.options['MSC_VERSION'])
|
damask_root = Path(os.environ['DAMASK_ROOT'])
|
||||||
if int(msc_version) == msc_version:
|
|
||||||
msc_version = int(msc_version)
|
|
||||||
msc_root = Path(damask.environment.options['MSC_ROOT'])
|
|
||||||
damask_root = damask.environment.root_dir
|
|
||||||
|
|
||||||
parser = argparse.ArgumentParser(
|
parser = argparse.ArgumentParser(
|
||||||
description='Apply DAMASK modification to MSC.Marc/Mentat',
|
description='Apply DAMASK modification to MSC.Marc/Mentat',
|
||||||
|
@ -24,7 +20,7 @@ def copy_and_replace(in_file,dst):
|
||||||
with open(in_file) as f:
|
with open(in_file) as f:
|
||||||
content = f.read()
|
content = f.read()
|
||||||
content = content.replace('%INSTALLDIR%',str(msc_root))
|
content = content.replace('%INSTALLDIR%',str(msc_root))
|
||||||
content = content.replace('%VERSION%',str(msc_version))
|
content = content.replace('%VERSION%', msc_version)
|
||||||
content = content.replace('%EDITOR%', parser.parse_args().editor)
|
content = content.replace('%EDITOR%', parser.parse_args().editor)
|
||||||
with open(dst/Path(in_file).name,'w') as f:
|
with open(dst/Path(in_file).name,'w') as f:
|
||||||
f.write(content)
|
f.write(content)
|
||||||
|
@ -53,8 +49,8 @@ for in_file in glob.glob(str(src/'job_run.ms')):
|
||||||
|
|
||||||
print('compiling Mentat menu binaries...')
|
print('compiling Mentat menu binaries...')
|
||||||
|
|
||||||
executable = str(msc_root/f'mentat{msc_version}/bin/mentat')
|
executable = msc_root/f'mentat{msc_version}/bin/mentat'
|
||||||
menu_file = str(msc_root/f'mentat{msc_version}/menus/linux64/main.msb')
|
menu_file = msc_root/f'mentat{msc_version}/menus/linux64/main.msb'
|
||||||
os.system(f'xvfb-run {executable} -compile {menu_file}')
|
os.system(f'xvfb-run {executable} -compile {menu_file}')
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -3,11 +3,9 @@
|
||||||
# Makes postprocessing routines accessible from everywhere.
|
# Makes postprocessing routines accessible from everywhere.
|
||||||
import sys
|
import sys
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
import os
|
||||||
|
|
||||||
import damask
|
bin_dir = Path(os.environ['DAMASK_ROOT'])/'bin'
|
||||||
|
|
||||||
env = damask.Environment()
|
|
||||||
bin_dir = env.root_dir/Path('bin')
|
|
||||||
|
|
||||||
if not bin_dir.exists():
|
if not bin_dir.exists():
|
||||||
bin_dir.mkdir()
|
bin_dir.mkdir()
|
||||||
|
@ -15,7 +13,7 @@ if not bin_dir.exists():
|
||||||
|
|
||||||
sys.stdout.write('\nsymbolic linking...\n')
|
sys.stdout.write('\nsymbolic linking...\n')
|
||||||
for sub_dir in ['pre','post']:
|
for sub_dir in ['pre','post']:
|
||||||
the_dir = env.root_dir/Path('processing')/Path(sub_dir)
|
the_dir = Path(os.environ['DAMASK_ROOT'])/'processing'/sub_dir
|
||||||
|
|
||||||
for the_file in the_dir.glob('*.py'):
|
for the_file in the_dir.glob('*.py'):
|
||||||
src = the_dir/the_file
|
src = the_dir/the_file
|
||||||
|
|
|
@ -41,15 +41,15 @@ for filename in options.filenames:
|
||||||
table = damask.Table(np.ones(np.product(results.cells),dtype=int)*int(inc[3:]),{'inc':(1,)})\
|
table = damask.Table(np.ones(np.product(results.cells),dtype=int)*int(inc[3:]),{'inc':(1,)})\
|
||||||
.add('pos',coords.reshape(-1,3))
|
.add('pos',coords.reshape(-1,3))
|
||||||
|
|
||||||
results.pick('homogenizations',False)
|
results.view('homogenizations',False)
|
||||||
results.pick('phases',True)
|
results.view('phases',True)
|
||||||
for label in options.con:
|
for label in options.con:
|
||||||
x = results.get_dataset_location(label)
|
x = results.get_dataset_location(label)
|
||||||
if len(x) != 0:
|
if len(x) != 0:
|
||||||
table = table.add(label,results.read_dataset(x,0,plain=True).reshape(results.cells.prod(),-1))
|
table = table.add(label,results.read_dataset(x,0,plain=True).reshape(results.cells.prod(),-1))
|
||||||
|
|
||||||
results.pick('phases',False)
|
results.view('phases',False)
|
||||||
results.pick('homogenizations',True)
|
results.view('homogenizations',True)
|
||||||
for label in options.mat:
|
for label in options.mat:
|
||||||
x = results.get_dataset_location(label)
|
x = results.get_dataset_location(label)
|
||||||
if len(x) != 0:
|
if len(x) != 0:
|
||||||
|
|
|
@ -16,8 +16,6 @@ with open(_Path(__file__).parent/_Path('VERSION')) as _f:
|
||||||
__version__ = version
|
__version__ = version
|
||||||
|
|
||||||
# make classes directly accessible as damask.Class
|
# make classes directly accessible as damask.Class
|
||||||
from ._environment import Environment as _ # noqa
|
|
||||||
environment = _()
|
|
||||||
from . import util # noqa
|
from . import util # noqa
|
||||||
from . import seeds # noqa
|
from . import seeds # noqa
|
||||||
from . import tensor # noqa
|
from . import tensor # noqa
|
||||||
|
@ -38,7 +36,6 @@ from ._result import Result # noqa
|
||||||
|
|
||||||
|
|
||||||
# deprecated
|
# deprecated
|
||||||
Environment = _
|
|
||||||
from ._asciitable import ASCIItable # noqa
|
from ._asciitable import ASCIItable # noqa
|
||||||
from ._test import Test # noqa
|
from ._test import Test # noqa
|
||||||
from .util import extendableOption # noqa
|
from .util import extendableOption # noqa
|
||||||
|
|
|
@ -1,41 +0,0 @@
|
||||||
import os
|
|
||||||
from pathlib import Path
|
|
||||||
|
|
||||||
class Environment:
|
|
||||||
|
|
||||||
@property
|
|
||||||
def screen_size(self):
|
|
||||||
try:
|
|
||||||
import wx
|
|
||||||
_ = wx.App(False) # noqa
|
|
||||||
width, height = wx.GetDisplaySize()
|
|
||||||
except ImportError:
|
|
||||||
try:
|
|
||||||
import tkinter
|
|
||||||
tk = tkinter.Tk()
|
|
||||||
width = tk.winfo_screenwidth()
|
|
||||||
height = tk.winfo_screenheight()
|
|
||||||
tk.destroy()
|
|
||||||
except Exception as e:
|
|
||||||
width = 1024
|
|
||||||
height = 768
|
|
||||||
|
|
||||||
return (width,height)
|
|
||||||
|
|
||||||
|
|
||||||
@property
|
|
||||||
def options(self):
|
|
||||||
options = {}
|
|
||||||
for item in ['DAMASK_NUM_THREADS',
|
|
||||||
'MSC_ROOT',
|
|
||||||
'MSC_VERSION',
|
|
||||||
]:
|
|
||||||
options[item] = os.environ[item] if item in os.environ else None
|
|
||||||
|
|
||||||
return options
|
|
||||||
|
|
||||||
|
|
||||||
@property
|
|
||||||
def root_dir(self):
|
|
||||||
"""Return DAMASK root path."""
|
|
||||||
return Path(__file__).parents[2]
|
|
|
@ -1,7 +1,7 @@
|
||||||
import copy
|
import copy
|
||||||
import multiprocessing as mp
|
import multiprocessing as mp
|
||||||
from functools import partial
|
from functools import partial
|
||||||
from os import path
|
import os
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
@ -10,7 +10,6 @@ import h5py
|
||||||
from scipy import ndimage, spatial
|
from scipy import ndimage, spatial
|
||||||
from vtk.util.numpy_support import vtk_to_numpy as vtk_to_np
|
from vtk.util.numpy_support import vtk_to_numpy as vtk_to_np
|
||||||
|
|
||||||
from . import environment
|
|
||||||
from . import VTK
|
from . import VTK
|
||||||
from . import util
|
from . import util
|
||||||
from . import grid_filters
|
from . import grid_filters
|
||||||
|
@ -278,14 +277,14 @@ class Grid:
|
||||||
"""
|
"""
|
||||||
root_dir ='DataContainers'
|
root_dir ='DataContainers'
|
||||||
f = h5py.File(fname, 'r')
|
f = h5py.File(fname, 'r')
|
||||||
g = path.join(root_dir,base_group,'_SIMPL_GEOMETRY')
|
g = os.path.join(root_dir,base_group,'_SIMPL_GEOMETRY')
|
||||||
cells = f[path.join(g,'DIMENSIONS')][()]
|
cells = f[os.path.join(g,'DIMENSIONS')][()]
|
||||||
size = f[path.join(g,'SPACING')][()] * cells
|
size = f[os.path.join(g,'SPACING')][()] * cells
|
||||||
origin = f[path.join(g,'ORIGIN')][()]
|
origin = f[os.path.join(g,'ORIGIN')][()]
|
||||||
|
|
||||||
ma = np.arange(cells.prod(),dtype=int) \
|
ma = np.arange(cells.prod(),dtype=int) \
|
||||||
if point_data is None else \
|
if point_data is None else \
|
||||||
np.reshape(f[path.join(root_dir,base_group,point_data,material)],cells.prod())
|
np.reshape(f[os.path.join(root_dir,base_group,point_data,material)],cells.prod())
|
||||||
|
|
||||||
return Grid(ma.reshape(cells,order='F'),size,origin,util.execution_stamp('Grid','load_DREAM3D'))
|
return Grid(ma.reshape(cells,order='F'),size,origin,util.execution_stamp('Grid','load_DREAM3D'))
|
||||||
|
|
||||||
|
@ -355,7 +354,7 @@ class Grid:
|
||||||
seeds_p = seeds
|
seeds_p = seeds
|
||||||
coords = grid_filters.coordinates0_point(cells,size).reshape(-1,3)
|
coords = grid_filters.coordinates0_point(cells,size).reshape(-1,3)
|
||||||
|
|
||||||
pool = mp.Pool(processes = int(environment.options['DAMASK_NUM_THREADS']))
|
pool = mp.Pool(int(os.environ.get('OMP_NUM_THREADS',1)))
|
||||||
result = pool.map_async(partial(Grid._find_closest_seed,seeds_p,weights_p), [coord for coord in coords])
|
result = pool.map_async(partial(Grid._find_closest_seed,seeds_p,weights_p), [coord for coord in coords])
|
||||||
pool.close()
|
pool.close()
|
||||||
pool.join()
|
pool.join()
|
||||||
|
|
|
@ -80,12 +80,12 @@ class Result:
|
||||||
self.out_type_ho += f['/'.join([self.increments[0],'homogenization',m])].keys()
|
self.out_type_ho += f['/'.join([self.increments[0],'homogenization',m])].keys()
|
||||||
self.out_type_ho = list(set(self.out_type_ho)) # make unique
|
self.out_type_ho = list(set(self.out_type_ho)) # make unique
|
||||||
|
|
||||||
self.selection = {'increments': self.increments,
|
self.visible = {'increments': self.increments,
|
||||||
'phases': self.phases,
|
'phases': self.phases,
|
||||||
'homogenizations': self.homogenizations,
|
'homogenizations': self.homogenizations,
|
||||||
'out_type_ph': self.out_type_ph,
|
'out_type_ph': self.out_type_ph,
|
||||||
'out_type_ho': self.out_type_ho
|
'out_type_ho': self.out_type_ho
|
||||||
}
|
}
|
||||||
|
|
||||||
self.fname = Path(fname).absolute()
|
self.fname = Path(fname).absolute()
|
||||||
|
|
||||||
|
@ -94,23 +94,23 @@ class Result:
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
"""Show summary of file content."""
|
"""Show summary of file content."""
|
||||||
all_selected_increments = self.selection['increments']
|
visible_increments = self.visible['increments']
|
||||||
|
|
||||||
self.pick('increments',all_selected_increments[0:1])
|
self.view('increments',visible_increments[0:1])
|
||||||
first = self.list_data()
|
first = self.list_data()
|
||||||
|
|
||||||
self.pick('increments',all_selected_increments[-1:])
|
self.view('increments',visible_increments[-1:])
|
||||||
last = '' if len(all_selected_increments) < 2 else self.list_data()
|
last = '' if len(visible_increments) < 2 else self.list_data()
|
||||||
|
|
||||||
self.pick('increments',all_selected_increments)
|
self.view('increments',visible_increments)
|
||||||
|
|
||||||
in_between = '' if len(all_selected_increments) < 3 else \
|
in_between = '' if len(visible_increments) < 3 else \
|
||||||
''.join([f'\n{inc}\n ...\n' for inc in all_selected_increments[1:-2]])
|
''.join([f'\n{inc}\n ...\n' for inc in visible_increments[1:-2]])
|
||||||
|
|
||||||
return util.srepr(first + in_between + last)
|
return util.srepr(first + in_between + last)
|
||||||
|
|
||||||
|
|
||||||
def _manage_selection(self,action,what,datasets):
|
def _manage_view(self,action,what,datasets):
|
||||||
"""
|
"""
|
||||||
Manages the visibility of the groups.
|
Manages the visibility of the groups.
|
||||||
|
|
||||||
|
@ -119,7 +119,7 @@ class Result:
|
||||||
action : str
|
action : str
|
||||||
Select from 'set', 'add', and 'del'.
|
Select from 'set', 'add', and 'del'.
|
||||||
what : str
|
what : str
|
||||||
Attribute to change (must be from self.selection).
|
Attribute to change (must be in self.visible).
|
||||||
datasets : list of str or bool
|
datasets : list of str or bool
|
||||||
Name of datasets as list, supports ? and * wildcards.
|
Name of datasets as list, supports ? and * wildcards.
|
||||||
True is equivalent to [*], False is equivalent to []
|
True is equivalent to [*], False is equivalent to []
|
||||||
|
@ -156,18 +156,18 @@ class Result:
|
||||||
choice.append(self.increments[idx+1])
|
choice.append(self.increments[idx+1])
|
||||||
|
|
||||||
valid = [e for e_ in [glob.fnmatch.filter(getattr(self,what),s) for s in choice] for e in e_]
|
valid = [e for e_ in [glob.fnmatch.filter(getattr(self,what),s) for s in choice] for e in e_]
|
||||||
existing = set(self.selection[what])
|
existing = set(self.visible[what])
|
||||||
|
|
||||||
if action == 'set':
|
if action == 'set':
|
||||||
self.selection[what] = valid
|
self.visible[what] = valid
|
||||||
elif action == 'add':
|
elif action == 'add':
|
||||||
add = existing.union(valid)
|
add = existing.union(valid)
|
||||||
add_sorted = sorted(add, key=natural_sort)
|
add_sorted = sorted(add, key=natural_sort)
|
||||||
self.selection[what] = add_sorted
|
self.visible[what] = add_sorted
|
||||||
elif action == 'del':
|
elif action == 'del':
|
||||||
diff = existing.difference(valid)
|
diff = existing.difference(valid)
|
||||||
diff_sorted = sorted(diff, key=natural_sort)
|
diff_sorted = sorted(diff, key=natural_sort)
|
||||||
self.selection[what] = diff_sorted
|
self.visible[what] = diff_sorted
|
||||||
|
|
||||||
|
|
||||||
def _get_attribute(self,path,attr):
|
def _get_attribute(self,path,attr):
|
||||||
|
@ -245,72 +245,72 @@ class Result:
|
||||||
|
|
||||||
def iterate(self,what):
|
def iterate(self,what):
|
||||||
"""
|
"""
|
||||||
Iterate over selection items by setting each one selected.
|
Iterate over visible items and view them independently.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
what : str
|
what : str
|
||||||
Attribute to change (must be from self.selection).
|
Attribute to change (must be from self.visible).
|
||||||
|
|
||||||
"""
|
"""
|
||||||
datasets = self.selection[what]
|
datasets = self.visible[what]
|
||||||
last_selection = datasets.copy()
|
last_view = datasets.copy()
|
||||||
for dataset in datasets:
|
for dataset in datasets:
|
||||||
if last_selection != self.selection[what]:
|
if last_view != self.visible[what]:
|
||||||
self._manage_selection('set',what,datasets)
|
self._manage_view('set',what,datasets)
|
||||||
raise Exception
|
raise Exception
|
||||||
self._manage_selection('set',what,dataset)
|
self._manage_view('set',what,dataset)
|
||||||
last_selection = self.selection[what]
|
last_view = self.visible[what]
|
||||||
yield dataset
|
yield dataset
|
||||||
self._manage_selection('set',what,datasets)
|
self._manage_view('set',what,datasets)
|
||||||
|
|
||||||
|
|
||||||
def pick(self,what,datasets):
|
def view(self,what,datasets):
|
||||||
"""
|
"""
|
||||||
Set selection.
|
Set view.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
what : str
|
what : str
|
||||||
attribute to change (must be from self.selection)
|
attribute to change (must be from self.visible)
|
||||||
datasets : list of str or bool
|
datasets : list of str or bool
|
||||||
name of datasets as list, supports ? and * wildcards.
|
name of datasets as list, supports ? and * wildcards.
|
||||||
True is equivalent to [*], False is equivalent to []
|
True is equivalent to [*], False is equivalent to []
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._manage_selection('set',what,datasets)
|
self._manage_view('set',what,datasets)
|
||||||
|
|
||||||
|
|
||||||
def pick_more(self,what,datasets):
|
def view_more(self,what,datasets):
|
||||||
"""
|
"""
|
||||||
Add to selection.
|
Add to view.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
what : str
|
what : str
|
||||||
attribute to change (must be from self.selection)
|
attribute to change (must be from self.visible)
|
||||||
datasets : list of str or bool
|
datasets : list of str or bool
|
||||||
name of datasets as list, supports ? and * wildcards.
|
name of datasets as list, supports ? and * wildcards.
|
||||||
True is equivalent to [*], False is equivalent to []
|
True is equivalent to [*], False is equivalent to []
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._manage_selection('add',what,datasets)
|
self._manage_view('add',what,datasets)
|
||||||
|
|
||||||
|
|
||||||
def pick_less(self,what,datasets):
|
def view_less(self,what,datasets):
|
||||||
"""
|
"""
|
||||||
Delete from selection.
|
Delete from view.
|
||||||
|
|
||||||
Parameters
|
Parameters
|
||||||
----------
|
----------
|
||||||
what : str
|
what : str
|
||||||
attribute to change (must be from self.selection)
|
attribute to change (must be from self.visible)
|
||||||
datasets : list of str or bool
|
datasets : list of str or bool
|
||||||
name of datasets as list, supports ? and * wildcards.
|
name of datasets as list, supports ? and * wildcards.
|
||||||
True is equivalent to [*], False is equivalent to []
|
True is equivalent to [*], False is equivalent to []
|
||||||
|
|
||||||
"""
|
"""
|
||||||
self._manage_selection('del',what,datasets)
|
self._manage_view('del',what,datasets)
|
||||||
|
|
||||||
|
|
||||||
def rename(self,name_old,name_new):
|
def rename(self,name_old,name_new):
|
||||||
|
@ -1134,8 +1134,7 @@ class Result:
|
||||||
|
|
||||||
"""
|
"""
|
||||||
chunk_size = 1024**2//8
|
chunk_size = 1024**2//8
|
||||||
num_threads = damask.environment.options['DAMASK_NUM_THREADS']
|
pool = mp.Pool(int(os.environ.get('OMP_NUM_THREADS',1)))
|
||||||
pool = mp.Pool(int(num_threads) if num_threads is not None else None)
|
|
||||||
lock = mp.Manager().Lock()
|
lock = mp.Manager().Lock()
|
||||||
|
|
||||||
groups = self.groups_with_datasets(datasets.values())
|
groups = self.groups_with_datasets(datasets.values())
|
||||||
|
@ -1190,8 +1189,8 @@ class Result:
|
||||||
"""
|
"""
|
||||||
Write XDMF file to directly visualize data in DADF5 file.
|
Write XDMF file to directly visualize data in DADF5 file.
|
||||||
|
|
||||||
This works only for scalar, 3-vector and 3x3-tensor data.
|
The view is not taken into account, i.e. the content of the
|
||||||
Selection is not taken into account.
|
whole file will be included.
|
||||||
"""
|
"""
|
||||||
if self.N_constituents != 1 or len(self.phases) != 1 or not self.structured:
|
if self.N_constituents != 1 or len(self.phases) != 1 or not self.structured:
|
||||||
raise TypeError('XDMF output requires homogeneous grid')
|
raise TypeError('XDMF output requires homogeneous grid')
|
||||||
|
@ -1320,10 +1319,10 @@ class Result:
|
||||||
|
|
||||||
N_digits = int(np.floor(np.log10(max(1,int(self.increments[-1][3:])))))+1
|
N_digits = int(np.floor(np.log10(max(1,int(self.increments[-1][3:])))))+1
|
||||||
|
|
||||||
for inc in util.show_progress(self.iterate('increments'),len(self.selection['increments'])):
|
for inc in util.show_progress(self.iterate('increments'),len(self.visible['increments'])):
|
||||||
|
|
||||||
picked_backup_ho = self.selection['homogenizations'].copy()
|
viewed_backup_ho = self.visible['homogenizations'].copy()
|
||||||
self.pick('homogenizations',False)
|
self.view('homogenizations',False)
|
||||||
for label in (labels if isinstance(labels,list) else [labels]):
|
for label in (labels if isinstance(labels,list) else [labels]):
|
||||||
for o in self.iterate('out_type_ph'):
|
for o in self.iterate('out_type_ph'):
|
||||||
for c in range(self.N_constituents):
|
for c in range(self.N_constituents):
|
||||||
|
@ -1343,10 +1342,10 @@ class Result:
|
||||||
ph_name = re.compile(r'(?<=(phase\/))(.*?)(?=(mechanics))') # identify phase name
|
ph_name = re.compile(r'(?<=(phase\/))(.*?)(?=(mechanics))') # identify phase name
|
||||||
dset_name = prefix+re.sub(ph_name,r'',paths[0].split('/',1)[1]) # remove phase name
|
dset_name = prefix+re.sub(ph_name,r'',paths[0].split('/',1)[1]) # remove phase name
|
||||||
v.add(array,dset_name+f' / {self._get_attribute(paths[0],"Unit")}')
|
v.add(array,dset_name+f' / {self._get_attribute(paths[0],"Unit")}')
|
||||||
self.pick('homogenizations',picked_backup_ho)
|
self.view('homogenizations',viewed_backup_ho)
|
||||||
|
|
||||||
picked_backup_ph = self.selection['phases'].copy()
|
viewed_backup_ph = self.visible['phases'].copy()
|
||||||
self.pick('phases',False)
|
self.view('phases',False)
|
||||||
for label in (labels if isinstance(labels,list) else [labels]):
|
for label in (labels if isinstance(labels,list) else [labels]):
|
||||||
for _ in self.iterate('out_type_ho'):
|
for _ in self.iterate('out_type_ho'):
|
||||||
paths = self.get_dataset_location(label)
|
paths = self.get_dataset_location(label)
|
||||||
|
@ -1354,7 +1353,7 @@ class Result:
|
||||||
continue
|
continue
|
||||||
array = self.read_dataset(paths)
|
array = self.read_dataset(paths)
|
||||||
v.add(array,paths[0].split('/',1)[1]+f' / {self._get_attribute(paths[0],"Unit")}')
|
v.add(array,paths[0].split('/',1)[1]+f' / {self._get_attribute(paths[0],"Unit")}')
|
||||||
self.pick('phases',picked_backup_ph)
|
self.view('phases',viewed_backup_ph)
|
||||||
|
|
||||||
u = self.read_dataset(self.get_dataset_location('u_n' if mode.lower() == 'cell' else 'u_p'))
|
u = self.read_dataset(self.get_dataset_location('u_n' if mode.lower() == 'cell' else 'u_p'))
|
||||||
v.add(u,'u')
|
v.add(u,'u')
|
||||||
|
|
|
@ -5,6 +5,7 @@ import logging
|
||||||
import logging.config
|
import logging.config
|
||||||
from collections.abc import Iterable
|
from collections.abc import Iterable
|
||||||
from optparse import OptionParser
|
from optparse import OptionParser
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
|
@ -180,7 +181,7 @@ class Test:
|
||||||
|
|
||||||
def fileInRoot(self,dir,file):
|
def fileInRoot(self,dir,file):
|
||||||
"""Path to a file in the root directory of DAMASK."""
|
"""Path to a file in the root directory of DAMASK."""
|
||||||
return str(damask.environment.root_dir/dir/file)
|
return str(Path(os.environ['DAMASK_ROOT'])/dir/file)
|
||||||
|
|
||||||
|
|
||||||
def fileInReference(self,file):
|
def fileInReference(self,file):
|
||||||
|
|
|
@ -10,7 +10,6 @@ from vtk.util.numpy_support import numpy_to_vtkIdTypeArray as np_to_vtkIdTypeArr
|
||||||
from vtk.util.numpy_support import vtk_to_numpy as vtk_to_np
|
from vtk.util.numpy_support import vtk_to_numpy as vtk_to_np
|
||||||
|
|
||||||
from . import util
|
from . import util
|
||||||
from . import environment
|
|
||||||
from . import Table
|
from . import Table
|
||||||
|
|
||||||
|
|
||||||
|
@ -348,6 +347,23 @@ class VTK:
|
||||||
|
|
||||||
See http://compilatrix.com/article/vtk-1 for further ideas.
|
See http://compilatrix.com/article/vtk-1 for further ideas.
|
||||||
"""
|
"""
|
||||||
|
def screen_size():
|
||||||
|
try:
|
||||||
|
import wx
|
||||||
|
_ = wx.App(False) # noqa
|
||||||
|
width, height = wx.GetDisplaySize()
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
import tkinter
|
||||||
|
tk = tkinter.Tk()
|
||||||
|
width = tk.winfo_screenwidth()
|
||||||
|
height = tk.winfo_screenheight()
|
||||||
|
tk.destroy()
|
||||||
|
except Exception as e:
|
||||||
|
width = 1024
|
||||||
|
height = 768
|
||||||
|
|
||||||
|
return (width,height)
|
||||||
mapper = vtk.vtkDataSetMapper()
|
mapper = vtk.vtkDataSetMapper()
|
||||||
mapper.SetInputData(self.vtk_data)
|
mapper.SetInputData(self.vtk_data)
|
||||||
actor = vtk.vtkActor()
|
actor = vtk.vtkActor()
|
||||||
|
@ -361,7 +377,7 @@ class VTK:
|
||||||
ren.AddActor(actor)
|
ren.AddActor(actor)
|
||||||
ren.SetBackground(0.2,0.2,0.2)
|
ren.SetBackground(0.2,0.2,0.2)
|
||||||
|
|
||||||
window.SetSize(environment.screen_size[0],environment.screen_size[1])
|
window.SetSize(screen_size[0],screen_size[1])
|
||||||
|
|
||||||
iren = vtk.vtkRenderWindowInteractor()
|
iren = vtk.vtkRenderWindowInteractor()
|
||||||
iren.SetRenderWindow(window)
|
iren.SetRenderWindow(window)
|
||||||
|
|
|
@ -2,14 +2,13 @@ import subprocess
|
||||||
import shlex
|
import shlex
|
||||||
import re
|
import re
|
||||||
import io
|
import io
|
||||||
|
import os
|
||||||
from pathlib import Path
|
from pathlib import Path
|
||||||
|
|
||||||
from .. import environment
|
|
||||||
|
|
||||||
class Marc:
|
class Marc:
|
||||||
"""Wrapper to run DAMASK with MSCMarc."""
|
"""Wrapper to run DAMASK with MSCMarc."""
|
||||||
|
|
||||||
def __init__(self,version=environment.options['MSC_VERSION']):
|
def __init__(self,version=os.environ['MSC_VERSION']):
|
||||||
"""
|
"""
|
||||||
Create a Marc solver object.
|
Create a Marc solver object.
|
||||||
|
|
||||||
|
@ -25,9 +24,7 @@ class Marc:
|
||||||
@property
|
@property
|
||||||
def library_path(self):
|
def library_path(self):
|
||||||
|
|
||||||
path_MSC = environment.options['MSC_ROOT']
|
path_lib = Path(f'{os.environ["MSC_ROOT"]}/mentat{self.version}/shlib/linux64')
|
||||||
path_lib = Path(f'{path_MSC}/mentat{self.version}/shlib/linux64')
|
|
||||||
|
|
||||||
if not path_lib.is_dir():
|
if not path_lib.is_dir():
|
||||||
raise FileNotFoundError(f'library path "{path_lib}" not found')
|
raise FileNotFoundError(f'library path "{path_lib}" not found')
|
||||||
|
|
||||||
|
@ -37,9 +34,7 @@ class Marc:
|
||||||
@property
|
@property
|
||||||
def tools_path(self):
|
def tools_path(self):
|
||||||
|
|
||||||
path_MSC = environment.options['MSC_ROOT']
|
path_tools = Path(f'{os.environ["MSC_ROOT"]}/marc{self.version}/tools')
|
||||||
path_tools = Path(f'{path_MSC}/marc{self.version}/tools')
|
|
||||||
|
|
||||||
if not path_tools.is_dir():
|
if not path_tools.is_dir():
|
||||||
raise FileNotFoundError(f'tools path "{path_tools}" not found')
|
raise FileNotFoundError(f'tools path "{path_tools}" not found')
|
||||||
|
|
||||||
|
@ -54,7 +49,7 @@ class Marc:
|
||||||
optimization = '',
|
optimization = '',
|
||||||
):
|
):
|
||||||
|
|
||||||
usersub = environment.root_dir/'src/DAMASK_marc'
|
usersub = Path(os.environ['DAMASK_ROOT'])/'src/DAMASK_marc'
|
||||||
usersub = usersub.parent/(usersub.name + ('.f90' if compile else '.marc'))
|
usersub = usersub.parent/(usersub.name + ('.f90' if compile else '.marc'))
|
||||||
if not usersub.is_file():
|
if not usersub.is_file():
|
||||||
raise FileNotFoundError(f'subroutine ({"source" if compile else "binary"}) "{usersub}" not found')
|
raise FileNotFoundError(f'subroutine ({"source" if compile else "binary"}) "{usersub}" not found')
|
||||||
|
|
|
@ -21,7 +21,7 @@ def default(tmp_path,ref_path):
|
||||||
fname = '12grains6x7x8_tensionY.hdf5'
|
fname = '12grains6x7x8_tensionY.hdf5'
|
||||||
shutil.copy(ref_path/fname,tmp_path)
|
shutil.copy(ref_path/fname,tmp_path)
|
||||||
f = Result(tmp_path/fname)
|
f = Result(tmp_path/fname)
|
||||||
f.pick('times',20.0)
|
f.view('times',20.0)
|
||||||
return f
|
return f
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
|
@ -43,56 +43,56 @@ class TestResult:
|
||||||
print(default)
|
print(default)
|
||||||
|
|
||||||
|
|
||||||
def test_pick_all(self,default):
|
def test_view_all(self,default):
|
||||||
default.pick('increments',True)
|
default.view('increments',True)
|
||||||
a = default.get_dataset_location('F')
|
a = default.get_dataset_location('F')
|
||||||
default.pick('increments','*')
|
default.view('increments','*')
|
||||||
b = default.get_dataset_location('F')
|
b = default.get_dataset_location('F')
|
||||||
default.pick('increments',default.incs_in_range(0,np.iinfo(int).max))
|
default.view('increments',default.incs_in_range(0,np.iinfo(int).max))
|
||||||
c = default.get_dataset_location('F')
|
c = default.get_dataset_location('F')
|
||||||
|
|
||||||
default.pick('times',True)
|
default.view('times',True)
|
||||||
d = default.get_dataset_location('F')
|
d = default.get_dataset_location('F')
|
||||||
default.pick('times','*')
|
default.view('times','*')
|
||||||
e = default.get_dataset_location('F')
|
e = default.get_dataset_location('F')
|
||||||
default.pick('times',default.times_in_range(0.0,np.inf))
|
default.view('times',default.times_in_range(0.0,np.inf))
|
||||||
f = default.get_dataset_location('F')
|
f = default.get_dataset_location('F')
|
||||||
assert a == b == c == d == e ==f
|
assert a == b == c == d == e ==f
|
||||||
|
|
||||||
@pytest.mark.parametrize('what',['increments','times','phases']) # ToDo: discuss homogenizations
|
@pytest.mark.parametrize('what',['increments','times','phases']) # ToDo: discuss homogenizations
|
||||||
def test_pick_none(self,default,what):
|
def test_view_none(self,default,what):
|
||||||
default.pick(what,False)
|
default.view(what,False)
|
||||||
a = default.get_dataset_location('F')
|
a = default.get_dataset_location('F')
|
||||||
default.pick(what,[])
|
default.view(what,[])
|
||||||
b = default.get_dataset_location('F')
|
b = default.get_dataset_location('F')
|
||||||
|
|
||||||
assert a == b == []
|
assert a == b == []
|
||||||
|
|
||||||
@pytest.mark.parametrize('what',['increments','times','phases']) # ToDo: discuss homogenizations
|
@pytest.mark.parametrize('what',['increments','times','phases']) # ToDo: discuss homogenizations
|
||||||
def test_pick_more(self,default,what):
|
def test_view_more(self,default,what):
|
||||||
default.pick(what,False)
|
default.view(what,False)
|
||||||
default.pick_more(what,'*')
|
default.view_more(what,'*')
|
||||||
a = default.get_dataset_location('F')
|
a = default.get_dataset_location('F')
|
||||||
|
|
||||||
default.pick(what,True)
|
default.view(what,True)
|
||||||
b = default.get_dataset_location('F')
|
b = default.get_dataset_location('F')
|
||||||
|
|
||||||
assert a == b
|
assert a == b
|
||||||
|
|
||||||
@pytest.mark.parametrize('what',['increments','times','phases']) # ToDo: discuss homogenizations
|
@pytest.mark.parametrize('what',['increments','times','phases']) # ToDo: discuss homogenizations
|
||||||
def test_pick_less(self,default,what):
|
def test_view_less(self,default,what):
|
||||||
default.pick(what,True)
|
default.view(what,True)
|
||||||
default.pick_less(what,'*')
|
default.view_less(what,'*')
|
||||||
a = default.get_dataset_location('F')
|
a = default.get_dataset_location('F')
|
||||||
|
|
||||||
default.pick(what,False)
|
default.view(what,False)
|
||||||
b = default.get_dataset_location('F')
|
b = default.get_dataset_location('F')
|
||||||
|
|
||||||
assert a == b == []
|
assert a == b == []
|
||||||
|
|
||||||
def test_pick_invalid(self,default):
|
def test_view_invalid(self,default):
|
||||||
with pytest.raises(AttributeError):
|
with pytest.raises(AttributeError):
|
||||||
default.pick('invalid',True)
|
default.view('invalid',True)
|
||||||
|
|
||||||
def test_add_absolute(self,default):
|
def test_add_absolute(self,default):
|
||||||
default.add_absolute('F_e')
|
default.add_absolute('F_e')
|
||||||
|
@ -307,7 +307,7 @@ class TestResult:
|
||||||
|
|
||||||
@pytest.mark.parametrize('overwrite',['off','on'])
|
@pytest.mark.parametrize('overwrite',['off','on'])
|
||||||
def test_add_overwrite(self,default,overwrite):
|
def test_add_overwrite(self,default,overwrite):
|
||||||
default.pick('times',default.times_in_range(0,np.inf)[-1])
|
default.view('times',default.times_in_range(0,np.inf)[-1])
|
||||||
|
|
||||||
default.add_stress_Cauchy()
|
default.add_stress_Cauchy()
|
||||||
loc = default.get_dataset_location('sigma')
|
loc = default.get_dataset_location('sigma')
|
||||||
|
|
|
@ -32,7 +32,7 @@ contains
|
||||||
subroutine parallelization_init
|
subroutine parallelization_init
|
||||||
|
|
||||||
integer :: err, typeSize
|
integer :: err, typeSize
|
||||||
!$ integer :: got_env, DAMASK_NUM_THREADS, threadLevel
|
!$ integer :: got_env, OMP_NUM_THREADS, threadLevel
|
||||||
!$ character(len=6) NumThreadsString
|
!$ character(len=6) NumThreadsString
|
||||||
#ifdef PETSc
|
#ifdef PETSc
|
||||||
PetscErrorCode :: petsc_err
|
PetscErrorCode :: petsc_err
|
||||||
|
@ -87,19 +87,19 @@ call MPI_Comm_rank(PETSC_COMM_WORLD,worldrank,err)
|
||||||
open(OUTPUT_UNIT,file='/dev/null',status='replace') ! close() alone will leave some temp files in cwd
|
open(OUTPUT_UNIT,file='/dev/null',status='replace') ! close() alone will leave some temp files in cwd
|
||||||
endif
|
endif
|
||||||
|
|
||||||
!$ call get_environment_variable(name='DAMASK_NUM_THREADS',value=NumThreadsString,STATUS=got_env)
|
!$ call get_environment_variable(name='OMP_NUM_THREADS',value=NumThreadsString,STATUS=got_env)
|
||||||
!$ if(got_env /= 0) then
|
!$ if(got_env /= 0) then
|
||||||
!$ print*, 'Could not determine value of $DAMASK_NUM_THREADS'
|
!$ print*, 'Could not determine value of $OMP_NUM_THREADS'
|
||||||
!$ DAMASK_NUM_THREADS = 1_pI32
|
!$ OMP_NUM_THREADS = 1_pI32
|
||||||
!$ else
|
!$ else
|
||||||
!$ read(NumThreadsString,'(i6)') DAMASK_NUM_THREADS
|
!$ read(NumThreadsString,'(i6)') OMP_NUM_THREADS
|
||||||
!$ if (DAMASK_NUM_THREADS < 1_pI32) then
|
!$ if (OMP_NUM_THREADS < 1_pI32) then
|
||||||
!$ print*, 'Invalid DAMASK_NUM_THREADS: '//trim(NumThreadsString)
|
!$ print*, 'Invalid OMP_NUM_THREADS: '//trim(NumThreadsString)
|
||||||
!$ DAMASK_NUM_THREADS = 1_pI32
|
!$ OMP_NUM_THREADS = 1_pI32
|
||||||
!$ endif
|
!$ endif
|
||||||
!$ endif
|
!$ endif
|
||||||
!$ print'(a,i2)', ' DAMASK_NUM_THREADS: ',DAMASK_NUM_THREADS
|
!$ print'(a,i2)', ' OMP_NUM_THREADS: ',OMP_NUM_THREADS
|
||||||
!$ call omp_set_num_threads(DAMASK_NUM_THREADS)
|
!$ call omp_set_num_threads(OMP_NUM_THREADS)
|
||||||
|
|
||||||
end subroutine parallelization_init
|
end subroutine parallelization_init
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue