Merge branch 'development' into 285-attributes-of-damask-result-should-depend-on-view

This commit is contained in:
Philip Eisenlohr 2024-01-02 09:37:59 -05:00
commit 5da7ff56b9
42 changed files with 1076 additions and 1152 deletions

View File

@ -17,17 +17,16 @@ pkg_get_variable(CMAKE_Fortran_COMPILER PETSc fcompiler)
pkg_get_variable(CMAKE_C_COMPILER PETSc ccompiler) pkg_get_variable(CMAKE_C_COMPILER PETSc ccompiler)
# Solver determines name of project # Solver determines name of project
string(TOUPPER "${DAMASK_SOLVER}" DAMASK_SOLVER) string(TOUPPER "${DAMASK_SOLVER}" DAMASK_SOLVER_UPPER)
if(DAMASK_SOLVER STREQUAL "GRID") string(TOLOWER "${DAMASK_SOLVER}" DAMASK_SOLVER_LOWER)
project(damask-grid HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C) if("${DAMASK_SOLVER_UPPER}" MATCHES "^(GRID|MESH|TEST)$")
elseif(DAMASK_SOLVER STREQUAL "MESH") project("damask-${DAMASK_SOLVER_LOWER}" HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
project(damask-mesh HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
elseif(DAMASK_SOLVER STREQUAL "TEST")
project(damask-test HOMEPAGE_URL https://damask.mpie.de LANGUAGES Fortran C)
else() else()
message(FATAL_ERROR "Invalid solver: DAMASK_SOLVER=${DAMASK_SOLVER}") message(FATAL_ERROR "Invalid solver: DAMASK_SOLVER=${DAMASK_SOLVER}")
endif() endif()
add_definitions("-D${DAMASK_SOLVER}") add_definitions("-D${DAMASK_SOLVER_UPPER}")
set(CMAKE_Fortran_PREPROCESS "ON") # works only for CMake >= 3.18
# EXPERIMENTAL: This might help to detect HDF5 and FFTW3 in the future if PETSc is not aware of them # EXPERIMENTAL: This might help to detect HDF5 and FFTW3 in the future if PETSc is not aware of them
set(ENV{PKG_CONFIG_PATH} "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/externalpackages:$ENV{PKG_CONFIG_PATH}") set(ENV{PKG_CONFIG_PATH} "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/externalpackages:$ENV{PKG_CONFIG_PATH}")
@ -91,24 +90,24 @@ if(CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
endif() endif()
list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
if (CMAKE_Fortran_COMPILER_ID STREQUAL "GNU") if (CMAKE_Fortran_COMPILER_ID STREQUAL "GNU")
include(Compiler-GNU)
set(Fortran_COMPILER_VERSION_MIN 9.1) set(Fortran_COMPILER_VERSION_MIN 9.1)
elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "Intel") elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "Intel")
include(Compiler-Intel)
set(Fortran_COMPILER_VERSION_MIN 19) set(Fortran_COMPILER_VERSION_MIN 19)
elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "IntelLLVM") elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "IntelLLVM")
include(Compiler-IntelLLVM) set(Fortran_COMPILER_VERSION_MIN 19)
elseif(CMAKE_Fortran_COMPILER_ID STREQUAL "LLVMFlang")
set(Fortran_COMPILER_VERSION_MIN 19) set(Fortran_COMPILER_VERSION_MIN 19)
else() else()
message(FATAL_ERROR "Compiler '${CMAKE_Fortran_COMPILER_ID}' not supported") message(FATAL_ERROR "Compiler '${CMAKE_Fortran_COMPILER_ID}' not supported")
endif() endif()
if(CMAKE_Fortran_COMPILER_VERSION VERSION_LESS Fortran_COMPILER_VERSION_MIN) if(CMAKE_Fortran_COMPILER_VERSION VERSION_LESS Fortran_COMPILER_VERSION_MIN)
message(FATAL_ERROR "Version '${CMAKE_Fortran_COMPILER_VERSION}' of '${CMAKE_Fortran_COMPILER_ID}' is not supported") message(FATAL_ERROR "Version '${CMAKE_Fortran_COMPILER_VERSION}' of '${CMAKE_Fortran_COMPILER_ID}' is not supported")
endif() endif()
list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake)
include("Compiler-${CMAKE_Fortran_COMPILER_ID}")
file(STRINGS "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/petsc/conf/petscvariables" PETSC_EXTERNAL_LIB REGEX "PETSC_EXTERNAL_LIB_BASIC = .*$?") file(STRINGS "$ENV{PETSC_DIR}/$ENV{PETSC_ARCH}/lib/petsc/conf/petscvariables" PETSC_EXTERNAL_LIB REGEX "PETSC_EXTERNAL_LIB_BASIC = .*$?")
string(REPLACE "PETSC_EXTERNAL_LIB_BASIC = " "" PETSC_EXTERNAL_LIB "${PETSC_EXTERNAL_LIB}") string(REPLACE "PETSC_EXTERNAL_LIB_BASIC = " "" PETSC_EXTERNAL_LIB "${PETSC_EXTERNAL_LIB}")
message("PETSC_EXTERNAL_LIB:\n${PETSC_EXTERNAL_LIB}\n") message("PETSC_EXTERNAL_LIB:\n${PETSC_EXTERNAL_LIB}\n")

@ -1 +1 @@
Subproject commit 5a715996e6e8418a59fbcaf3715a2516ad05ed51 Subproject commit 62df7f24f2a95fda255f7d20b130afcfeecb1b4a

View File

@ -1 +1 @@
3.0.0-alpha8-172-gec624a86a 3.0.0-alpha8-246-g112b5be1a

View File

@ -1,10 +1,6 @@
################################################################################################### ###################################################################################################
# GNU Compiler # GNU Compiler
################################################################################################### ###################################################################################################
if (CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 9.0)
message (FATAL_ERROR "GCC Compiler version: ${CMAKE_Fortran_COMPILER_VERSION} not supported")
endif ()
if (OPENMP) if (OPENMP)
set (OPENMP_FLAGS "-fopenmp") set (OPENMP_FLAGS "-fopenmp")
endif () endif ()
@ -23,8 +19,7 @@ set (STANDARD_CHECK "-std=f2018 -pedantic-errors" )
#------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------
# Fine tuning compilation options # Fine tuning compilation options
set (COMPILE_FLAGS "${COMPILE_FLAGS} -cpp") set (COMPILE_FLAGS "${COMPILE_FLAGS} -cpp") # preprocessor, needed for CMake < 3.18
# preprocessor
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fPIE") set (COMPILE_FLAGS "${COMPILE_FLAGS} -fPIE")
# position independent code # position independent code

View File

@ -1,10 +1,6 @@
################################################################################################### ###################################################################################################
# Intel Compiler # Intel Compiler
################################################################################################### ###################################################################################################
if (CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 18.0)
message (FATAL_ERROR "Intel Compiler version: ${CMAKE_Fortran_COMPILER_VERSION} not supported")
endif ()
if (OPENMP) if (OPENMP)
set (OPENMP_FLAGS "-qopenmp -parallel") set (OPENMP_FLAGS "-qopenmp -parallel")
endif () endif ()
@ -26,8 +22,7 @@ set (LINKER_FLAGS "${LINKER_FLAGS} -shared-intel")
#------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------
# Fine tuning compilation options # Fine tuning compilation options
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fpp") set (COMPILE_FLAGS "${COMPILE_FLAGS} -fpp") # preprocessor, needed for CMake < 3.18
# preprocessor
set (COMPILE_FLAGS "${COMPILE_FLAGS} -no-ftz") set (COMPILE_FLAGS "${COMPILE_FLAGS} -no-ftz")
# disable flush underflow to zero, will be set if -O[1,2,3] # disable flush underflow to zero, will be set if -O[1,2,3]

View File

@ -1,10 +1,6 @@
################################################################################################### ###################################################################################################
# Intel Compiler # IntelLLVM Compiler
################################################################################################### ###################################################################################################
if (CMAKE_Fortran_COMPILER_VERSION VERSION_LESS 18.0)
message (FATAL_ERROR "Intel Compiler version: ${CMAKE_Fortran_COMPILER_VERSION} not supported")
endif ()
if (OPENMP) if (OPENMP)
set (OPENMP_FLAGS "-fiopenmp") set (OPENMP_FLAGS "-fiopenmp")
endif () endif ()
@ -28,8 +24,7 @@ set (LINKER_FLAGS "${LINKER_FLAGS} -shared-intel -fc=ifx")
#------------------------------------------------------------------------------------------------ #------------------------------------------------------------------------------------------------
# Fine tuning compilation options # Fine tuning compilation options
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fpp") set (COMPILE_FLAGS "${COMPILE_FLAGS} -fpp") # preprocessor, needed for CMake < 3.18
# preprocessor
set (COMPILE_FLAGS "${COMPILE_FLAGS} -no-ftz") set (COMPILE_FLAGS "${COMPILE_FLAGS} -no-ftz")
# disable flush underflow to zero, will be set if -O[1,2,3] # disable flush underflow to zero, will be set if -O[1,2,3]

View File

@ -0,0 +1,12 @@
###################################################################################################
# LLVM Compiler
###################################################################################################
if (OPENMP)
set (OPENMP_FLAGS "-fopenmp")
endif ()
set (STANDARD_CHECK "-std=f2018 -pedantic" )
#------------------------------------------------------------------------------------------------
# Fine tuning compilation options
set (COMPILE_FLAGS "${COMPILE_FLAGS} -cpp") # preprocessor, needed for CMake < 3.18

View File

@ -1,14 +0,0 @@
# initial elastic step
$Loadcase 1 t 0.0005 N 1 f_out 1
Face 3 Y -0.025
Face 4 X 0.0
Face 4 Y 0.0
Face 4 Z 0.0
$EndLoadcase
# plastic step
$Loadcase 2 t 1.0 N 10 f_out 2
Face 3 Y -0.025
Face 4 X 0.0
Face 4 Y 0.0
Face 4 Z 0.0
$EndLoadcase

View File

@ -0,0 +1,22 @@
---
loadstep:
- boundary_conditions:
mechanical:
- dot_u: ['x', -0.025, 'x']
tag: 3
- dot_u: [0.0, 0.0, 0.0]
tag: 4
discretization:
t: 0.0005
N: 1
f_out: 1
- boundary_conditions:
mechanical:
- dot_u: ['x', -0.025, 'x']
tag: 3
- dot_u: [0.0, 0.0, 0.0]
tag: 4
discretization:
t: 1.0
N: 10
f_out: 2

View File

@ -1,14 +0,0 @@
# initial elastic step
$Loadcase 1 t 0.0005 N 1 f_out 1
Face 1 Z 0.01
Face 2 X 0.0
Face 2 Y 0.0
Face 2 Z 0.0
$EndLoadcase
# plastic step
$Loadcase 2 t 1.0 N 10 f_out 2
Face 1 Z 0.01
Face 2 X 0.0
Face 2 Y 0.0
Face 2 Z 0.0
$EndLoadcase

View File

@ -0,0 +1,22 @@
---
loadstep:
- boundary_conditions:
mechanical:
- dot_u: ['x', 'x', 0.01]
tag: 1
- dot_u: [0.0, 0.0, 0.0]
tag: 2
discretization:
t: 0.0005
N: 1
f_out: 1
- boundary_conditions:
mechanical:
- dot_u: ['x', 'x', 0.01]
tag: 1
- dot_u: [0.0, 0.0, 0.0]
tag: 2
discretization:
t: 1.0
N: 10
f_out: 2

View File

@ -1,18 +0,0 @@
# initial elastic step
$Loadcase 1 t 0.0005 N 1 f_out 1
Face 1 X 0.0
Face 1 Y 0.0
Face 1 Z 0.0
Face 2 X 0.0
Face 2 Y 0.0
Face 2 Z 0.0025
$EndLoadcase
# plastic step
$Loadcase 2 t 1.0 N 10 f_out 2
Face 1 X 0.0
Face 1 Y 0.0
Face 1 Z 0.0
Face 2 X 0.0
Face 2 Y 0.0
Face 2 Z 0.0025
$EndLoadcase

View File

@ -0,0 +1,22 @@
---
loadstep:
- boundary_conditions:
mechanical:
- dot_u: [0.0, 0.0, 0.0]
tag: 1
- dot_u: [0.0, 0.0, 0.0025]
tag: 2
discretization:
t: 0.0005
N: 1
f_out: 1
- boundary_conditions:
mechanical:
- dot_u: [0.0, 0.0, 0.0]
tag: 1
- dot_u: [0.0, 0.0, 0.0025]
tag: 2
discretization:
t: 1.0
N: 10
f_out: 2

View File

@ -2,7 +2,7 @@ import os
import json import json
import functools import functools
import colorsys import colorsys
from typing import Optional, Union, TextIO from typing import Optional, Union
from itertools import chain from itertools import chain
import numpy as np import numpy as np
@ -344,30 +344,6 @@ class Colormap(mpl.colors.ListedColormap):
return Colormap(np.array(rev.colors),rev.name[:-4] if rev.name.endswith('_r_r') else rev.name) return Colormap(np.array(rev.colors),rev.name[:-4] if rev.name.endswith('_r_r') else rev.name)
def _get_file_handle(self,
fname: Union[FileHandle, None],
suffix: str = '') -> TextIO:
"""
Provide file handle.
Parameters
----------
fname : file, str, pathlib.Path, or None
Name or handle of file.
If None, colormap name + suffix.
suffix: str, optional
Extension to use for colormap file.
Defaults to empty.
Returns
-------
f : file object
File handle with write access.
"""
return util.open_text(self.name.replace(' ','_')+suffix if fname is None else fname, 'w')
def save_paraview(self, def save_paraview(self,
fname: Optional[FileHandle] = None): fname: Optional[FileHandle] = None):
""" """
@ -387,7 +363,7 @@ class Colormap(mpl.colors.ListedColormap):
'RGBPoints':list(chain.from_iterable([(i,*c) for i,c in enumerate(self.colors.round(6))])) 'RGBPoints':list(chain.from_iterable([(i,*c) for i,c in enumerate(self.colors.round(6))]))
}] }]
fhandle = self._get_file_handle(fname,'.json') with util.open_text(self.name.replace(' ','_')+'.json' if fname is None else fname, 'w') as fhandle:
json.dump(out,fhandle,indent=4) json.dump(out,fhandle,indent=4)
fhandle.write('\n') fhandle.write('\n')
@ -405,7 +381,9 @@ class Colormap(mpl.colors.ListedColormap):
""" """
labels = {'RGBA':4} if self.colors.shape[1] == 4 else {'RGB': 3} labels = {'RGBA':4} if self.colors.shape[1] == 4 else {'RGB': 3}
t = Table(labels,self.colors,[f'Creator: {util.execution_stamp("Colormap")}']) t = Table(labels,self.colors,[f'Creator: {util.execution_stamp("Colormap")}'])
t.save(self._get_file_handle(fname,'.txt'))
with util.open_text(self.name.replace(' ','_')+'.txt' if fname is None else fname, 'w') as fhandle:
t.save(fhandle)
def save_GOM(self, fname: Optional[FileHandle] = None): def save_GOM(self, fname: Optional[FileHandle] = None):
@ -425,7 +403,8 @@ class Colormap(mpl.colors.ListedColormap):
+ ' '.join([f' 0 {c[0]} {c[1]} {c[2]} 255 1' for c in reversed((self.colors*255).astype(np.int64))]) \ + ' '.join([f' 0 {c[0]} {c[1]} {c[2]} 255 1' for c in reversed((self.colors*255).astype(np.int64))]) \
+ '\n' + '\n'
self._get_file_handle(fname,'.legend').write(GOM_str) with util.open_text(self.name.replace(' ','_')+'.legend' if fname is None else fname, 'w') as fhandle:
fhandle.write(GOM_str)
def save_gmsh(self, def save_gmsh(self,
@ -443,7 +422,9 @@ class Colormap(mpl.colors.ListedColormap):
gmsh_str = 'View.ColorTable = {\n' \ gmsh_str = 'View.ColorTable = {\n' \
+'\n'.join([f'{c[0]},{c[1]},{c[2]},' for c in self.colors[:,:3]*255]) \ +'\n'.join([f'{c[0]},{c[1]},{c[2]},' for c in self.colors[:,:3]*255]) \
+'\n}\n' +'\n}\n'
self._get_file_handle(fname,'.msh').write(gmsh_str)
with util.open_text(self.name.replace(' ','_')+'.msh' if fname is None else fname, 'w') as fhandle:
fhandle.write(gmsh_str)
@staticmethod @staticmethod

View File

@ -70,7 +70,7 @@ class LoadcaseGrid(YAML):
if key not in kwargs: if key not in kwargs:
kwargs[key] = default kwargs[key] = default
fhandle = util.open_text(fname,'w') with util.open_text(fname,'w') as fhandle:
try: try:
fhandle.write(yaml.dump(self,Dumper=MaskedMatrixDumper,**kwargs)) fhandle.write(yaml.dump(self,Dumper=MaskedMatrixDumper,**kwargs))
except TypeError: # compatibility with old pyyaml except TypeError: # compatibility with old pyyaml

View File

@ -277,7 +277,7 @@ class Table:
Table data from file. Table data from file.
""" """
f = util.open_text(fname) with util.open_text(fname) as f:
f.seek(0) f.seek(0)
comments = [] comments = []
@ -339,9 +339,8 @@ class Table:
Table data from file. Table data from file.
""" """
f = util.open_text(fname) with util.open_text(fname) as f:
f.seek(0) f.seek(0)
content = f.readlines() content = f.readlines()
comments = [util.execution_stamp('Table','from_ang')] comments = [util.execution_stamp('Table','from_ang')]
@ -605,8 +604,7 @@ class Table:
labels += [f'{util.srepr(self.shapes[l],"x")}:{i+1}_{l}' \ labels += [f'{util.srepr(self.shapes[l],"x")}:{i+1}_{l}' \
for i in range(np.prod(self.shapes[l]))] for i in range(np.prod(self.shapes[l]))]
f = util.open_text(fname,'w') with util.open_text(fname,'w') as f:
f.write('\n'.join([f'# {c}' for c in self.comments] + [' '.join(labels)])+('\n' if labels else '')) f.write('\n'.join([f'# {c}' for c in self.comments] + [' '.join(labels)])+('\n' if labels else ''))
try: # backward compatibility try: # backward compatibility
self.data.to_csv(f,sep=' ',na_rep='nan',index=False,header=False,lineterminator='\n') self.data.to_csv(f,sep=' ',na_rep='nan',index=False,header=False,lineterminator='\n')

View File

@ -197,7 +197,9 @@ class YAML(dict):
YAML from file. YAML from file.
""" """
return cls(yaml.load(util.open_text(fname), Loader=SafeLoader)) with util.open_text(fname) as fhandle:
return cls(yaml.load(fhandle, Loader=SafeLoader))
def save(self, def save(self,
fname: FileHandle, fname: FileHandle,
@ -220,7 +222,7 @@ class YAML(dict):
if 'sort_keys' not in kwargs: if 'sort_keys' not in kwargs:
kwargs['sort_keys'] = False kwargs['sort_keys'] = False
fhandle = util.open_text(fname,'w') with util.open_text(fname,'w') as fhandle:
try: try:
fhandle.write(yaml.dump(self,Dumper=NiceDumper,**kwargs)) fhandle.write(yaml.dump(self,Dumper=NiceDumper,**kwargs))
except TypeError: # compatibility with old pyyaml except TypeError: # compatibility with old pyyaml

View File

@ -8,12 +8,13 @@ import shlex as _shlex
import re as _re import re as _re
import signal as _signal import signal as _signal
import fractions as _fractions import fractions as _fractions
import contextlib as _contextlib
from collections import abc as _abc, OrderedDict as _OrderedDict from collections import abc as _abc, OrderedDict as _OrderedDict
from functools import reduce as _reduce, partial as _partial, wraps as _wraps from functools import reduce as _reduce, partial as _partial, wraps as _wraps
import inspect import inspect
from typing import Optional as _Optional, Callable as _Callable, Union as _Union, Iterable as _Iterable, \ from typing import Optional as _Optional, Callable as _Callable, Union as _Union, Iterable as _Iterable, \
Dict as _Dict, List as _List, Tuple as _Tuple, Literal as _Literal, \ Dict as _Dict, List as _List, Tuple as _Tuple, Literal as _Literal, \
Any as _Any, TextIO as _TextIO Any as _Any, TextIO as _TextIO, Generator as _Generator
from pathlib import Path as _Path from pathlib import Path as _Path
import numpy as _np import numpy as _np
@ -193,11 +194,15 @@ def run(cmd: str,
return stdout, stderr return stdout, stderr
@_contextlib.contextmanager
def open_text(fname: _FileHandle, def open_text(fname: _FileHandle,
mode: _Literal['r','w'] = 'r') -> _TextIO: # noqa mode: _Literal['r','w'] = 'r') -> _Generator[_TextIO, None, None]: # noqa
""" """
Open a text file. Open a text file with Unix line endings
If a path or string is given, a context manager ensures that
the file handle is closed.
If a file handle is given, it remains unmodified.
Parameters Parameters
---------- ----------
@ -211,8 +216,12 @@ def open_text(fname: _FileHandle,
f : file handle f : file handle
""" """
return fname if not isinstance(fname, (str,_Path)) else \ if isinstance(fname, (str,_Path)):
open(_Path(fname).expanduser(),mode,newline=('\n' if mode == 'w' else None)) fhandle = open(_Path(fname).expanduser(),mode,newline=('\n' if mode == 'w' else None))
yield fhandle
fhandle.close()
else:
yield fname
def time_stamp() -> str: def time_stamp() -> str:
"""Provide current time as formatted string.""" """Provide current time as formatted string."""

View File

@ -63,7 +63,7 @@ def h5py_dataset_iterator():
"""Iterate over all datasets in an HDF5 file.""" """Iterate over all datasets in an HDF5 file."""
def _h5py_dataset_iterator(g, prefix=''): def _h5py_dataset_iterator(g, prefix=''):
for key,item in g.items(): for key,item in g.items():
path = os.path.join(prefix, key) path = '/'.join([prefix, key])
if isinstance(item, h5py.Dataset): # test for dataset if isinstance(item, h5py.Dataset): # test for dataset
yield (path, item) yield (path, item)
elif isinstance(item, h5py.Group): # test for group (go down) elif isinstance(item, h5py.Group): # test for group (go down)
@ -489,7 +489,7 @@ class TestResult:
c = [_.decode() for _ in cur[path]] c = [_.decode() for _ in cur[path]]
r = ['Unknown Phase Type'] + result._phases r = ['Unknown Phase Type'] + result._phases
assert c == r assert c == r
grp = os.path.split(path)[0] grp = str(path).rpartition('/')[0]
for attr in ref[grp].attrs: for attr in ref[grp].attrs:
assert np.array_equal(ref[grp].attrs[attr],cur[grp].attrs[attr]) assert np.array_equal(ref[grp].attrs[attr],cur[grp].attrs[attr])
for attr in dset.attrs: for attr in dset.attrs:

View File

@ -1,16 +1,7 @@
file(GLOB damask-sources CONFIGURE_DEPENDS *.f90 *.c) file(GLOB damask-sources CONFIGURE_DEPENDS *.f90 *.c)
if(PROJECT_NAME STREQUAL "damask-grid") set(executable-name "DAMASK_${DAMASK_SOLVER_LOWER}")
set(executable-name "DAMASK_grid") file(GLOB solver-sources CONFIGURE_DEPENDS ${DAMASK_SOLVER_LOWER}/*.f90)
file(GLOB solver-sources CONFIGURE_DEPENDS grid/*.f90)
elseif(PROJECT_NAME STREQUAL "damask-mesh")
set(executable-name "DAMASK_mesh")
file(GLOB solver-sources CONFIGURE_DEPENDS mesh/*.f90)
elseif(PROJECT_NAME STREQUAL "damask-test")
set(executable-name "DAMASK_test")
file(GLOB solver-sources CONFIGURE_DEPENDS test/*.f90)
endif()
if(NOT CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY") if(NOT CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
add_executable(${executable-name} ${damask-sources} ${solver-sources}) add_executable(${executable-name} ${damask-sources} ${solver-sources})

View File

@ -1560,7 +1560,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
call HDF5_chkerr(hdferr) call HDF5_chkerr(hdferr)
call MPI_Allgather(int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,& call MPI_Allgather(int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
readSize,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) readSize,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
end if end if
#endif #endif
myStart = int(0,HSIZE_T) myStart = int(0,HSIZE_T)
@ -1667,7 +1667,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
if (parallel) then if (parallel) then
call MPI_Allgather(int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,& call MPI_Allgather(int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
writeSize,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) writeSize,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
end if end if
#endif #endif
myStart = int(0,HSIZE_T) myStart = int(0,HSIZE_T)

View File

@ -48,7 +48,8 @@ implicit none(type,external)
IO_color, & IO_color, &
IO_error, & IO_error, &
IO_warning, & IO_warning, &
IO_STDOUT IO_STDOUT, &
tokenize
contains contains
@ -381,16 +382,10 @@ integer function IO_strAsInt(str)
character(len=*), intent(in) :: str !< string for conversion to int value character(len=*), intent(in) :: str !< string for conversion to int value
integer :: readStatus integer :: readStatus
character(len=*), parameter :: VALIDCHARS = '0123456789+- '
valid: if (verify(str,VALIDCHARS) == 0) then
read(str,*,iostat=readStatus) IO_strAsInt read(str,*,iostat=readStatus) IO_strAsInt
if (readStatus /= 0) call IO_error(111,str) if (readStatus /= 0) call IO_error(111,'cannot represent "'//str//'" as integer')
else valid
IO_strAsInt = 0
call IO_error(111,str)
end if valid
end function IO_strAsInt end function IO_strAsInt
@ -403,26 +398,22 @@ real(pREAL) function IO_strAsReal(str)
character(len=*), intent(in) :: str !< string for conversion to real value character(len=*), intent(in) :: str !< string for conversion to real value
integer :: readStatus integer :: readStatus
character(len=*), parameter :: VALIDCHARS = '0123456789eE.+- '
valid: if (verify(str,VALIDCHARS) == 0) then
read(str,*,iostat=readStatus) IO_strAsReal read(str,*,iostat=readStatus) IO_strAsReal
if (readStatus /= 0) call IO_error(112,str) if (readStatus /= 0) call IO_error(111,'cannot represent "'//str//'" as real')
else valid
IO_strAsReal = 0.0_pREAL
call IO_error(112,str)
end if valid
end function IO_strAsReal end function IO_strAsReal
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief Return logical value from given string. !> @brief Return logical value from given string.
!> @details: 'True' and 'true' are converted to .true.
!> @details: 'False' and 'false' are converted to .false.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
logical function IO_strAsBool(str) logical function IO_strAsBool(str)
character(len=*), intent(in) :: str !< string for conversion to int value character(len=*), intent(in) :: str !< string for conversion to boolean
if (trim(adjustl(str)) == 'True' .or. trim(adjustl(str)) == 'true') then if (trim(adjustl(str)) == 'True' .or. trim(adjustl(str)) == 'true') then
@ -430,8 +421,7 @@ logical function IO_strAsBool(str)
elseif (trim(adjustl(str)) == 'False' .or. trim(adjustl(str)) == 'false') then elseif (trim(adjustl(str)) == 'False' .or. trim(adjustl(str)) == 'false') then
IO_strAsBool = .false. IO_strAsBool = .false.
else else
IO_strAsBool = .false. call IO_error(111,'cannot represent "'//str//'" as boolean')
call IO_error(113,str)
end if end if
end function IO_strAsBool end function IO_strAsBool
@ -498,11 +488,7 @@ subroutine IO_error(error_ID,ext_msg,label1,ID1,label2,ID2)
case (110) case (110)
msg = 'invalid chunk selected' msg = 'invalid chunk selected'
case (111) case (111)
msg = 'invalid character for int:' msg = 'invalid string for conversion'
case (112)
msg = 'invalid character for real:'
case (113)
msg = 'invalid character for logical:'
case (114) case (114)
msg = 'cannot decode base64 string:' msg = 'cannot decode base64 string:'
@ -742,6 +728,33 @@ pure function CRLF2LF(str)
end function CRLF2LF end function CRLF2LF
!--------------------------------------------------------------------------------------------------
!> @brief Fortran 2023 tokenize (first form).
!--------------------------------------------------------------------------------------------------
pure subroutine tokenize(string,set,tokens)
character(len=*), intent(in) :: string, set
character(len=:), dimension(:), allocatable, intent(out) :: tokens
integer, allocatable, dimension(:,:) :: pos
integer :: i, s, e
allocate(pos(2,0))
e = 0
do while (e < verify(string,set,back=.true.))
s = e + merge(verify(string(e+1:),set),1,scan(string(e+1:),set)/=0)
e = s + merge(scan(string(s:),set)-2,len(string(s:))-1,scan(string(s:),set)/=0)
pos = reshape([pos,[s,e]],[2,size(pos)/2+1])
end do
allocate(character(len=merge(maxval(pos(2,:)-pos(1,:))+1,0,size(pos)>0))::tokens(size(pos,2)))
do i = 1, size(pos,2)
tokens(i) = string(pos(1,i):pos(2,i))
end do
end subroutine tokenize
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief Write statements to standard error. !> @brief Write statements to standard error.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -808,6 +821,7 @@ subroutine IO_selfTest()
integer, dimension(:), allocatable :: chunkPos integer, dimension(:), allocatable :: chunkPos
character(len=:), allocatable :: str,out character(len=:), allocatable :: str,out
character(len=:), dimension(:), allocatable :: tokens
if (dNeq(1.0_pREAL, IO_strAsReal('1.0'))) error stop 'IO_strAsReal' if (dNeq(1.0_pREAL, IO_strAsReal('1.0'))) error stop 'IO_strAsReal'
@ -887,6 +901,54 @@ subroutine IO_selfTest()
if ('abc,'//IO_EOL//'xxdefg,'//IO_EOL//'xxhij' /= IO_wrapLines('abc,defg, hij',filler='xx',length=4)) & if ('abc,'//IO_EOL//'xxdefg,'//IO_EOL//'xxhij' /= IO_wrapLines('abc,defg, hij',filler='xx',length=4)) &
error stop 'IO_wrapLines/7' error stop 'IO_wrapLines/7'
call tokenize('','$',tokens)
if (size(tokens) /= 0 .or. len(tokens) /=0) error stop 'tokenize empty'
call tokenize('abcd','dcba',tokens)
if (size(tokens) /= 0 .or. len(tokens) /=0) error stop 'tokenize only separators'
tokens=['a']
call test_tokenize('a','#',tokens)
call test_tokenize('#a','#',tokens)
call test_tokenize('a#','#',tokens)
tokens=['aa']
call test_tokenize('aa','#',tokens)
call test_tokenize('$aa','$',tokens)
call test_tokenize('aa$','$',tokens)
tokens=['a','b']
call test_tokenize('a$b','$',tokens)
call test_tokenize('@a@$b@','$@',tokens)
tokens=['aa','bb']
call test_tokenize('aa$bb','$',tokens)
call test_tokenize('aa$$bb','$',tokens)
call test_tokenize('aa$bb$','$',tokens)
tokens=['aa ','bbb ','cccc']
call test_tokenize('aa$bbb$cccc','$',tokens)
call test_tokenize('$aa$bbb$cccc$','$',tokens)
call tokenize('#aa@@bbb!!!cccc#','#@!',tokens)
contains
subroutine test_tokenize(input,delimiter,solution)
character(len=*), intent(in) :: input, delimiter
character(len=*), dimension(:), intent(in) :: solution
character(len=:), dimension(:), allocatable :: tok
integer :: i
call tokenize(input,delimiter,tok)
do i = 1,size(tok)
!if (solution(i) /= tok(i)) error stop 'tokenize "'//solution(i)//'" vs. "'//tok(i)//'"' ! requires 2018 standard
if (solution(i) /= tok(i)) error stop 'tokenize'
end do
end subroutine test_tokenize
end subroutine IO_selfTest end subroutine IO_selfTest
end module IO end module IO

View File

@ -30,7 +30,8 @@ module materialpoint_Marc
real(pREAL), dimension (:,:,:), allocatable, private :: & real(pREAL), dimension (:,:,:), allocatable, private :: &
materialpoint_cs !< Cauchy stress materialpoint_cs !< Cauchy stress
real(pREAL), dimension (:,:,:,:), allocatable, private :: & real(pREAL), dimension (:,:,:,:), allocatable, private :: &
materialpoint_dcsdE !< Cauchy stress tangent materialpoint_dcsdE, & !< Cauchy stress tangent
materialpoint_F !< deformation gradient
real(pREAL), dimension (:,:,:,:), allocatable, private :: & real(pREAL), dimension (:,:,:,:), allocatable, private :: &
materialpoint_dcsdE_knownGood !< known good tangent materialpoint_dcsdE_knownGood !< known good tangent
@ -95,6 +96,7 @@ subroutine materialpoint_init()
print'(/,1x,a)', '<<<+- materialpoint init -+>>>'; flush(IO_STDOUT) print'(/,1x,a)', '<<<+- materialpoint init -+>>>'; flush(IO_STDOUT)
allocate(materialpoint_F( 3,3,discretization_nIPs,discretization_Nelems), source= 0.0_pREAL)
allocate(materialpoint_cs( 6,discretization_nIPs,discretization_Nelems), source= 0.0_pREAL) allocate(materialpoint_cs( 6,discretization_nIPs,discretization_Nelems), source= 0.0_pREAL)
allocate(materialpoint_dcsdE( 6,6,discretization_nIPs,discretization_Nelems), source= 0.0_pREAL) allocate(materialpoint_dcsdE( 6,6,discretization_nIPs,discretization_Nelems), source= 0.0_pREAL)
allocate(materialpoint_dcsdE_knownGood(6,6,discretization_nIPs,discretization_Nelems), source= 0.0_pREAL) allocate(materialpoint_dcsdE_knownGood(6,6,discretization_nIPs,discretization_Nelems), source= 0.0_pREAL)
@ -140,10 +142,10 @@ subroutine materialpoint_general(mode, ffn, ffn1, temperature_inp, dt, elFE, ip,
if (iand(mode, materialpoint_RESTOREJACOBIAN) /= 0) & if (iand(mode, materialpoint_RESTOREJACOBIAN) /= 0) &
materialpoint_dcsde = materialpoint_dcsde_knownGood materialpoint_dcsde = materialpoint_dcsde_knownGood
if (iand(mode, materialpoint_AGERESULTS) /= 0) call materialpoint_forward if (iand(mode, materialpoint_AGERESULTS) /= 0) call materialpoint_forward()
homogenization_F0(1:3,1:3,ce) = ffn
homogenization_F(1:3,1:3,ce) = ffn1 homogenization_F(1:3,1:3,ce) = ffn1
materialpoint_F(1:3,1:3,ip,elCP) = ffn1
if (iand(mode, materialpoint_CALCRESULTS) /= 0) then if (iand(mode, materialpoint_CALCRESULTS) /= 0) then
@ -154,9 +156,8 @@ subroutine materialpoint_general(mode, ffn, ffn1, temperature_inp, dt, elFE, ip,
materialpoint_dcsde(1:6,1:6,ip,elCP) = ODD_JACOBIAN * math_eye(6) materialpoint_dcsde(1:6,1:6,ip,elCP) = ODD_JACOBIAN * math_eye(6)
else validCalculation else validCalculation
call homogenization_mechanical_response(dt,(elCP-1)*discretization_nIPs + ip,(elCP-1)*discretization_nIPs + ip) call homogenization_mechanical_response(dt,(elCP-1)*discretization_nIPs + ip, &
if (.not. terminallyIll) & (elCP-1)*discretization_nIPs + ip)
call homogenization_mechanical_response2(dt,[ip,ip],[elCP,elCP])
terminalIllness: if (terminallyIll) then terminalIllness: if (terminallyIll) then
@ -168,17 +169,17 @@ subroutine materialpoint_general(mode, ffn, ffn1, temperature_inp, dt, elFE, ip,
else terminalIllness else terminalIllness
! translate from P to sigma ! translate from P to sigma
Kirchhoff = matmul(homogenization_P(1:3,1:3,ce), transpose(homogenization_F(1:3,1:3,ce))) Kirchhoff = matmul(homogenization_P(1:3,1:3,ce), transpose(materialpoint_F(1:3,1:3,ip,elCP)))
J_inverse = 1.0_pREAL / math_det33(homogenization_F(1:3,1:3,ce)) J_inverse = 1.0_pREAL / math_det33(materialpoint_F(1:3,1:3,ip,elCP))
materialpoint_cs(1:6,ip,elCP) = math_sym33to6(J_inverse * Kirchhoff,weighted=.false.) materialpoint_cs(1:6,ip,elCP) = math_sym33to6(J_inverse * Kirchhoff,weighted=.false.)
! translate from dP/dF to dCS/dE ! translate from dP/dF to dCS/dE
H = 0.0_pREAL H = 0.0_pREAL
do i=1,3; do j=1,3; do k=1,3; do l=1,3; do m=1,3; do n=1,3 do i=1,3; do j=1,3; do k=1,3; do l=1,3; do m=1,3; do n=1,3
H(i,j,k,l) = H(i,j,k,l) & H(i,j,k,l) = H(i,j,k,l) &
+ homogenization_F(j,m,ce) * homogenization_F(l,n,ce) & + materialpoint_F(j,m,ip,elCP) * materialpoint_F(l,n,ip,elCP) &
* homogenization_dPdF(i,m,k,n,ce) & * homogenization_dPdF(i,m,k,n,ce) &
- math_delta(j,l) * homogenization_F(i,m,ce) * homogenization_P(k,m,ce) & - math_delta(j,l) * materialpoint_F(i,m,ip,elCP) * homogenization_P(k,m,ce) &
+ 0.5_pREAL * ( Kirchhoff(j,l)*math_delta(i,k) + Kirchhoff(i,k)*math_delta(j,l) & + 0.5_pREAL * ( Kirchhoff(j,l)*math_delta(i,k) + Kirchhoff(i,k)*math_delta(j,l) &
+ Kirchhoff(j,k)*math_delta(i,l) + Kirchhoff(i,l)*math_delta(j,k)) + Kirchhoff(j,k)*math_delta(i,l) + Kirchhoff(i,l)*math_delta(j,k))
end do; end do; end do; end do; end do; end do end do; end do; end do; end do; end do; end do

View File

@ -23,6 +23,7 @@ program DAMASK_grid
use materialpoint use materialpoint
use material use material
use spectral_utilities use spectral_utilities
use grid_mech_utilities
use grid_mechanical_spectral_basic use grid_mechanical_spectral_basic
use grid_mechanical_spectral_polarization use grid_mechanical_spectral_polarization
use grid_mechanical_FEM use grid_mechanical_FEM
@ -362,7 +363,7 @@ program DAMASK_grid
end if; flush(IO_STDOUT) end if; flush(IO_STDOUT)
call MPI_Allreduce(signal_SIGUSR1,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(signal_SIGUSR1,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
if (mod(inc,loadCases(l)%f_out) == 0 .or. sig) then if (mod(inc,loadCases(l)%f_out) == 0 .or. sig) then
print'(/,1x,a)', '... saving results ........................................................' print'(/,1x,a)', '... saving results ........................................................'
flush(IO_STDOUT) flush(IO_STDOUT)
@ -370,7 +371,7 @@ program DAMASK_grid
end if end if
if (sig) call signal_setSIGUSR1(.false.) if (sig) call signal_setSIGUSR1(.false.)
call MPI_Allreduce(signal_SIGUSR2,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(signal_SIGUSR2,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
if (mod(inc,loadCases(l)%f_restart) == 0 .or. sig) then if (mod(inc,loadCases(l)%f_restart) == 0 .or. sig) then
do field = 1, nActiveFields do field = 1, nActiveFields
select case (ID(field)) select case (ID(field))
@ -386,7 +387,7 @@ program DAMASK_grid
end if end if
if (sig) call signal_setSIGUSR2(.false.) if (sig) call signal_setSIGUSR2(.false.)
call MPI_Allreduce(signal_SIGINT,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(signal_SIGINT,sig,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
if (sig) exit loadCaseLooping if (sig) exit loadCaseLooping
end if skipping end if skipping

View File

@ -201,26 +201,26 @@ subroutine cellsSizeOrigin(c,s,o,header)
real(pREAL), dimension(3), intent(out) :: s,o real(pREAL), dimension(3), intent(out) :: s,o
character(len=*), intent(in) :: header character(len=*), intent(in) :: header
character(len=:), allocatable :: temp character(len=:), allocatable, dimension(:) :: temp
real(pREAL), dimension(3) :: delta real(pREAL), dimension(3) :: delta
integer :: i integer :: i
temp = getXMLValue(header,'Direction') temp = [getXMLValue(header,'Direction')]
if (temp /= '1 0 0 0 1 0 0 0 1' .and. temp /= '') & ! https://discourse.vtk.org/t/vti-specification/6526 if (temp(1) /= '1 0 0 0 1 0 0 0 1' .and. temp(1) /= '') & ! https://discourse.vtk.org/t/vti-specification/6526
call IO_error(error_ID = 844, ext_msg = 'coordinate order') call IO_error(error_ID = 844, ext_msg = 'coordinate order')
temp = getXMLValue(header,'WholeExtent') call tokenize(getXMLValue(header,'WholeExtent'),' ',temp)
if (any([(IO_intValue(temp,IO_strPos(temp),i),i=1,5,2)] /= 0)) & if (any([(IO_strAsInt(temp(i)),i=1,5,2)] /= 0)) &
call IO_error(error_ID = 844, ext_msg = 'coordinate start') call IO_error(error_ID = 844, ext_msg = 'coordinate start')
c = [(IO_intValue(temp,IO_strPos(temp),i),i=2,6,2)] c = [(IO_strAsInt(temp(i)),i=2,6,2)]
temp = getXMLValue(header,'Spacing') call tokenize(getXMLValue(header,'Spacing'),' ',temp)
delta = [(IO_realValue(temp,IO_strPos(temp),i),i=1,3)] delta = [(IO_strAsReal(temp(i)),i=1,3)]
s = delta * real(c,pREAL) s = delta * real(c,pREAL)
temp = getXMLValue(header,'Origin') call tokenize(getXMLValue(header,'Origin'),' ',temp)
o = [(IO_realValue(temp,IO_strPos(temp),i),i=1,3)] o = [(IO_strAsReal(temp(i)),i=1,3)]
end subroutine cellsSizeOrigin end subroutine cellsSizeOrigin

View File

@ -97,12 +97,12 @@ subroutine discretization_grid_init(restart)
call MPI_Bcast(cells,3_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) call MPI_Bcast(cells,3_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
if (cells(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1') if (cells(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1')
call MPI_Bcast(geomSize,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) call MPI_Bcast(geomSize,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call MPI_Bcast(origin,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI) call MPI_Bcast(origin,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
print'(/,1x,a,i0,a,i0,a,i0)', 'cells: ', cells(1), ' × ', cells(2), ' × ', cells(3) print'(/,1x,a,i0,a,i0,a,i0)', 'cells: ', cells(1), ' × ', cells(2), ' × ', cells(3)
print '(1x,a,es8.2,a,es8.2,a,es8.2,a)', 'size: ', geomSize(1), ' × ', geomSize(2), ' × ', geomSize(3), ' m³' print '(1x,a,es8.2,a,es8.2,a,es8.2,a)', 'size: ', geomSize(1), ' × ', geomSize(2), ' × ', geomSize(3), ' m³'
@ -126,15 +126,15 @@ subroutine discretization_grid_init(restart)
call MPI_Gather(product(cells(1:2))*cells3Offset,1_MPI_INTEGER_KIND,MPI_INTEGER,displs,& call MPI_Gather(product(cells(1:2))*cells3Offset,1_MPI_INTEGER_KIND,MPI_INTEGER,displs,&
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call MPI_Gather(product(myGrid), 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,& call MPI_Gather(product(myGrid), 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,&
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
allocate(materialAt(product(myGrid))) allocate(materialAt(product(myGrid)))
call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),& call MPI_Scatterv(materialAt_global,sendcounts,displs,MPI_INTEGER,materialAt,size(materialAt),&
MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call discretization_init(materialAt, & call discretization_init(materialAt, &
IPcoordinates0(myGrid,mySize,cells3Offset), & IPcoordinates0(myGrid,mySize,cells3Offset), &
@ -318,10 +318,10 @@ function discretization_grid_getInitialCondition(label) result(ic)
real(pREAL), dimension(:), allocatable :: ic_global, ic_local real(pREAL), dimension(:), allocatable :: ic_global, ic_local
integer(MPI_INTEGER_KIND) :: err_MPI integer(MPI_INTEGER_KIND) :: err_MPI
integer, dimension(worldsize) :: & integer, dimension(worldsize) :: &
displs, sendcounts displs, sendcounts
if (worldrank == 0) then if (worldrank == 0) then
ic_global = VTI_readDataset_real(IO_read(CLI_geomFile),label) ic_global = VTI_readDataset_real(IO_read(CLI_geomFile),label)
else else
@ -330,15 +330,15 @@ function discretization_grid_getInitialCondition(label) result(ic)
call MPI_Gather(product(cells(1:2))*cells3Offset, 1_MPI_INTEGER_KIND,MPI_INTEGER,displs,& call MPI_Gather(product(cells(1:2))*cells3Offset, 1_MPI_INTEGER_KIND,MPI_INTEGER,displs,&
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call MPI_Gather(product(cells(1:2))*cells3, 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,& call MPI_Gather(product(cells(1:2))*cells3, 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,&
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) 1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
allocate(ic_local(product(cells(1:2))*cells3)) allocate(ic_local(product(cells(1:2))*cells3))
call MPI_Scatterv(ic_global,sendcounts,displs,MPI_DOUBLE,ic_local,size(ic_local),& call MPI_Scatterv(ic_global,sendcounts,displs,MPI_DOUBLE,ic_local,size(ic_local),&
MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
ic = reshape(ic_local,[cells(1),cells(2),cells3]) ic = reshape(ic_local,[cells(1),cells(2),cells3])

View File

@ -44,7 +44,6 @@ module grid_damage_spectral
type(tNumerics) :: num type(tNumerics) :: num
type(tSolutionParams) :: params
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! PETSc data ! PETSc data
SNES :: SNES_damage SNES :: SNES_damage
@ -57,7 +56,7 @@ module grid_damage_spectral
! reference diffusion tensor, mobility etc. ! reference diffusion tensor, mobility etc.
integer :: totalIter = 0 !< total iteration in current increment integer :: totalIter = 0 !< total iteration in current increment
real(pREAL), dimension(3,3) :: K_ref real(pREAL), dimension(3,3) :: K_ref
real(pREAL) :: mu_ref real(pREAL) :: mu_ref, Delta_t_
public :: & public :: &
grid_damage_spectral_init, & grid_damage_spectral_init, &
@ -130,7 +129,7 @@ subroutine grid_damage_spectral_init(num_grid)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,& call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call DMDACreate3D(PETSC_COMM_WORLD, & call DMDACreate3D(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
@ -207,8 +206,7 @@ end subroutine grid_damage_spectral_init
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
function grid_damage_spectral_solution(Delta_t) result(solution) function grid_damage_spectral_solution(Delta_t) result(solution)
real(pREAL), intent(in) :: & real(pREAL), intent(in) :: Delta_t !< increment in time for current solution
Delta_t !< increment in time for current solution
type(tSolutionState) :: solution type(tSolutionState) :: solution
PetscInt :: devNull PetscInt :: devNull
@ -222,7 +220,7 @@ function grid_damage_spectral_solution(Delta_t) result(solution)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! set module wide availabe data ! set module wide availabe data
params%Delta_t = Delta_t Delta_t_ = Delta_t
call SNESSolve(SNES_damage,PETSC_NULL_VEC,phi_PETSc,err_PETSc) call SNESSolve(SNES_damage,PETSC_NULL_VEC,phi_PETSc,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
@ -241,10 +239,10 @@ function grid_damage_spectral_solution(Delta_t) result(solution)
phi_max = maxval(phi) phi_max = maxval(phi)
stagNorm = maxval(abs(phi - phi_stagInc)) stagNorm = maxval(abs(phi - phi_stagInc))
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*phi_max) solution%stagConverged = stagNorm < max(num%eps_damage_atol, num%eps_damage_rtol*phi_max)
call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
phi_stagInc = phi phi_stagInc = phi
call homogenization_set_phi(reshape(phi,[product(cells(1:2))*cells3])) call homogenization_set_phi(reshape(phi,[product(cells(1:2))*cells3]))
@ -350,12 +348,12 @@ subroutine formResidual(residual_subdomain,x_scal,r,dummy,err_PETSc)
ce = 0 ce = 0
do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1) do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
ce = ce + 1 ce = ce + 1
r(i,j,k) = params%Delta_t*(r(i,j,k) + homogenization_f_phi(phi(i,j,k),ce)) & r(i,j,k) = Delta_t_*(r(i,j,k) + homogenization_f_phi(phi(i,j,k),ce)) &
+ homogenization_mu_phi(ce)*(phi_lastInc(i,j,k) - phi(i,j,k)) & + homogenization_mu_phi(ce)*(phi_lastInc(i,j,k) - phi(i,j,k)) &
+ mu_ref*phi(i,j,k) + mu_ref*phi(i,j,k)
end do; end do; end do end do; end do; end do
r = max(min(utilities_GreenConvolution(r, K_ref, mu_ref, params%Delta_t),phi_lastInc),num%phi_min) & r = max(min(utilities_GreenConvolution(r, K_ref, mu_ref, Delta_t_),phi_lastInc),num%phi_min) &
- phi - phi
end associate end associate
err_PETSc = 0 err_PETSc = 0
@ -381,10 +379,10 @@ subroutine updateReference()
K_ref = K_ref*wgt K_ref = K_ref*wgt
call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
mu_ref = mu_ref*wgt mu_ref = mu_ref*wgt
call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
end subroutine updateReference end subroutine updateReference

View File

@ -23,6 +23,7 @@ module grid_mechanical_FEM
use math use math
use rotations use rotations
use spectral_utilities use spectral_utilities
use grid_mech_utilities
use config use config
use homogenization use homogenization
use discretization use discretization
@ -172,7 +173,7 @@ subroutine grid_mechanical_FEM_init(num_grid)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,& call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call DMDACreate3d(PETSC_COMM_WORLD, & call DMDACreate3d(PETSC_COMM_WORLD, &
DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, & DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, &
DMDA_STENCIL_BOX, & DMDA_STENCIL_BOX, &
@ -245,16 +246,16 @@ subroutine grid_mechanical_FEM_init(num_grid)
call HDF5_read(P_aim,groupHandle,'P_aim',.false.) call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(F_aim,groupHandle,'F_aim',.false.) call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(temp33n,groupHandle,'F') call HDF5_read(temp33n,groupHandle,'F')
F = reshape(temp33n,[3,3,cells(1),cells(2),cells3]) F = reshape(temp33n,[3,3,cells(1),cells(2),cells3])
call HDF5_read(temp33n,groupHandle,'F_lastInc') call HDF5_read(temp33n,groupHandle,'F_lastInc')
@ -269,7 +270,6 @@ subroutine grid_mechanical_FEM_init(num_grid)
F = spread(spread(spread(math_I3,3,cells(1)),4,cells(2)),5,cells3) F = spread(spread(spread(math_I3,3,cells(1)),4,cells(2)),5,cells3)
end if restartRead end if restartRead
homogenization_F0 = reshape(F_lastInc, [3,3,product(cells(1:2))*cells3]) ! set starting condition for homogenization_mechanical_response
call utilities_updateCoords(F) call utilities_updateCoords(F)
call utilities_constitutiveResponse(P_current,P_av,C_volAvg,devNull, & ! stress field, stress avg, global average of stiffness and (min+max)/2 call utilities_constitutiveResponse(P_current,P_av,C_volAvg,devNull, & ! stress field, stress avg, global average of stiffness and (min+max)/2
F, & ! target F F, & ! target F
@ -283,10 +283,10 @@ subroutine grid_mechanical_FEM_init(num_grid)
print'(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc print'(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_closeGroup(groupHandle) call HDF5_closeGroup(groupHandle)
call HDF5_closeFile(fileHandle) call HDF5_closeFile(fileHandle)
@ -390,7 +390,6 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai
F_lastInc = F F_lastInc = F
homogenization_F0 = reshape(F, [3,3,product(cells(1:2))*cells3])
end if end if
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -576,7 +575,7 @@ subroutine formResidual(da_local,x_local, &
P_av,C_volAvg,devNull, & P_av,C_volAvg,devNull, &
F,params%Delta_t,params%rotation_BC) F,params%Delta_t,params%rotation_BC)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! stress BC handling ! stress BC handling

View File

@ -23,6 +23,7 @@ module grid_mechanical_spectral_basic
use math use math
use rotations use rotations
use spectral_utilities use spectral_utilities
use grid_mech_utilities
use homogenization use homogenization
use discretization_grid use discretization_grid
@ -168,7 +169,7 @@ subroutine grid_mechanical_spectral_basic_init(num_grid)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,& call MPI_Allgather(int(cells3,MPI_INTEGER_KIND),1_MPI_INTEGER_KIND,MPI_INTEGER,&
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call DMDACreate3d(PETSC_COMM_WORLD, & call DMDACreate3d(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
@ -206,16 +207,16 @@ subroutine grid_mechanical_spectral_basic_init(num_grid)
call HDF5_read(P_aim,groupHandle,'P_aim',.false.) call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(F_aim,groupHandle,'F_aim',.false.) call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(temp33n,groupHandle,'F') call HDF5_read(temp33n,groupHandle,'F')
F = reshape(temp33n,[9,cells(1),cells(2),cells3]) F = reshape(temp33n,[9,cells(1),cells(2),cells3])
call HDF5_read(temp33n,groupHandle,'F_lastInc') call HDF5_read(temp33n,groupHandle,'F_lastInc')
@ -226,7 +227,6 @@ subroutine grid_mechanical_spectral_basic_init(num_grid)
F = reshape(F_lastInc,[9,cells(1),cells(2),cells3]) F = reshape(F_lastInc,[9,cells(1),cells(2),cells3])
end if restartRead end if restartRead
homogenization_F0 = reshape(F_lastInc, [3,3,product(cells(1:2))*cells3]) ! set starting condition for homogenization_mechanical_response
call utilities_updateCoords(reshape(F,shape(F_lastInc))) call utilities_updateCoords(reshape(F,shape(F_lastInc)))
call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2 call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2
reshape(F,shape(F_lastInc)), & ! target F reshape(F,shape(F_lastInc)), & ! target F
@ -238,13 +238,13 @@ subroutine grid_mechanical_spectral_basic_init(num_grid)
print'(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc print'(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(C_minMaxAvg,groupHandle,'C_minMaxAvg',.false.) call HDF5_read(C_minMaxAvg,groupHandle,'C_minMaxAvg',.false.)
call MPI_Bcast(C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_closeGroup(groupHandle) call HDF5_closeGroup(groupHandle)
call HDF5_closeFile(fileHandle) call HDF5_closeFile(fileHandle)
@ -347,8 +347,6 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_
F_lastInc,reshape(F,[3,3,cells(1),cells(2),cells3]),Delta_t_old, & F_lastInc,reshape(F,[3,3,cells(1),cells(2),cells3]),Delta_t_old, &
rotation_BC%rotate(F_aimDot,active=.true.)) rotation_BC%rotate(F_aimDot,active=.true.))
F_lastInc = reshape(F,[3,3,cells(1),cells(2),cells3]) F_lastInc = reshape(F,[3,3,cells(1),cells(2),cells3])
homogenization_F0 = reshape(F,[3,3,product(cells(1:2))*cells3])
end if end if
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -521,7 +519,7 @@ subroutine formResidual(residual_subdomain, F, &
P_av,C_volAvg,C_minMaxAvg, & P_av,C_volAvg,C_minMaxAvg, &
F,params%Delta_t,params%rotation_BC) F,params%Delta_t,params%rotation_BC)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
err_div = utilities_divergenceRMS(P) err_div = utilities_divergenceRMS(P)
end associate end associate

View File

@ -23,6 +23,7 @@ module grid_mechanical_spectral_polarization
use math use math
use rotations use rotations
use spectral_utilities use spectral_utilities
use grid_mech_utilities
use config use config
use homogenization use homogenization
use discretization_grid use discretization_grid
@ -189,7 +190,7 @@ subroutine grid_mechanical_spectral_polarization_init(num_grid)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call MPI_Allgather(int(cells3,pPetscInt),1_MPI_INTEGER_KIND,MPI_INTEGER,& call MPI_Allgather(int(cells3,pPetscInt),1_MPI_INTEGER_KIND,MPI_INTEGER,&
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call DMDACreate3d(PETSC_COMM_WORLD, & call DMDACreate3d(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
@ -229,16 +230,16 @@ subroutine grid_mechanical_spectral_polarization_init(num_grid)
call HDF5_read(P_aim,groupHandle,'P_aim',.false.) call HDF5_read(P_aim,groupHandle,'P_aim',.false.)
call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(P_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(F_aim,groupHandle,'F_aim',.false.) call HDF5_read(F_aim,groupHandle,'F_aim',.false.)
call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(F_aim,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.) call HDF5_read(F_aim_lastInc,groupHandle,'F_aim_lastInc',.false.)
call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(F_aim_lastInc,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.) call HDF5_read(F_aimDot,groupHandle,'F_aimDot',.false.)
call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(F_aimDot,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(temp33n,groupHandle,'F') call HDF5_read(temp33n,groupHandle,'F')
F = reshape(temp33n,[9,cells(1),cells(2),cells3]) F = reshape(temp33n,[9,cells(1),cells(2),cells3])
call HDF5_read(temp33n,groupHandle,'F_lastInc') call HDF5_read(temp33n,groupHandle,'F_lastInc')
@ -255,7 +256,6 @@ subroutine grid_mechanical_spectral_polarization_init(num_grid)
F_tau_lastInc = 2.0_pREAL*F_lastInc F_tau_lastInc = 2.0_pREAL*F_lastInc
end if restartRead end if restartRead
homogenization_F0 = reshape(F_lastInc, [3,3,product(cells(1:2))*cells3]) ! set starting condition for homogenization_mechanical_response
call utilities_updateCoords(reshape(F,shape(F_lastInc))) call utilities_updateCoords(reshape(F,shape(F_lastInc)))
call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2 call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2
reshape(F,shape(F_lastInc)), & ! target F reshape(F,shape(F_lastInc)), & ! target F
@ -267,13 +267,13 @@ subroutine grid_mechanical_spectral_polarization_init(num_grid)
print '(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc print '(1x,a,1x,i0)', 'loading additional restart data of increment', CLI_restartInc
call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.) call HDF5_read(C_volAvg,groupHandle,'C_volAvg',.false.)
call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.) call HDF5_read(C_volAvgLastInc,groupHandle,'C_volAvgLastInc',.false.)
call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(C_volAvgLastInc,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_read(C_minMaxAvg,groupHandle,'C_minMaxAvg',.false.) call HDF5_read(C_minMaxAvg,groupHandle,'C_minMaxAvg',.false.)
call MPI_Bcast(C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call HDF5_closeGroup(groupHandle) call HDF5_closeGroup(groupHandle)
call HDF5_closeFile(fileHandle) call HDF5_closeFile(fileHandle)
@ -391,7 +391,6 @@ subroutine grid_mechanical_spectral_polarization_forward(cutBack,guess,Delta_t,D
F_lastInc = reshape(F, [3,3,cells(1),cells(2),cells3]) F_lastInc = reshape(F, [3,3,cells(1),cells(2),cells3])
F_tau_lastInc = reshape(F_tau,[3,3,cells(1),cells(2),cells3]) F_tau_lastInc = reshape(F_tau,[3,3,cells(1),cells(2),cells3])
homogenization_F0 = reshape(F,[3,3,product(cells(1:2))*cells3])
end if end if
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -574,7 +573,7 @@ subroutine formResidual(residual_subdomain, FandF_tau, &
F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt F_av = sum(sum(sum(F,dim=5),dim=4),dim=3) * wgt
call MPI_Allreduce(MPI_IN_PLACE,F_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,F_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call SNESGetNumberFunctionEvals(SNES_mech,nfuncs,err_PETSc) call SNESGetNumberFunctionEvals(SNES_mech,nfuncs,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)

View File

@ -0,0 +1,249 @@
!--------------------------------------------------------------------------------------------------
!> @author Martin Diehl, KU Leuven
!> @brief Utilities used by the mech grid solver variants
!--------------------------------------------------------------------------------------------------
module grid_mech_utilities
#include <petsc/finclude/petscsys.h>
use PETScSys
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
use MPI_f08
#endif
use prec
use parallelization
use math
use rotations
use IO
use discretization_grid
use discretization
use spectral_utilities
use homogenization
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
implicit none(type,external)
#else
implicit none
#endif
private
!--------------------------------------------------------------------------------------------------
! derived types
type, public :: tBoundaryCondition !< set of parameters defining a boundary condition
real(pREAL), dimension(3,3) :: values = 0.0_pREAL
logical, dimension(3,3) :: mask = .true.
character(len=:), allocatable :: myType
end type tBoundaryCondition
type, public :: tSolutionParams
real(pREAL), dimension(3,3) :: stress_BC
logical, dimension(3,3) :: stress_mask
type(tRotation) :: rotation_BC
real(pREAL) :: Delta_t
end type tSolutionParams
public :: &
utilities_maskedCompliance, &
utilities_constitutiveResponse, &
utilities_calculateRate, &
utilities_forwardTensorField
contains
!--------------------------------------------------------------------------------------------------
!> @brief Calculate masked compliance tensor used to adjust F to fullfill stress BC.
!--------------------------------------------------------------------------------------------------
function utilities_maskedCompliance(rot_BC,mask_stress,C)
real(pREAL), dimension(3,3,3,3) :: utilities_maskedCompliance !< masked compliance
real(pREAL), intent(in), dimension(3,3,3,3) :: C !< current average stiffness
type(tRotation), intent(in) :: rot_BC !< rotation of load frame
logical, intent(in), dimension(3,3) :: mask_stress !< mask of stress BC
integer :: i, j
logical, dimension(9) :: mask_stressVector
logical, dimension(9,9) :: mask
real(pREAL), dimension(9,9) :: temp99_real
integer :: size_reduced = 0
real(pREAL), dimension(:,:), allocatable :: &
s_reduced, & !< reduced compliance matrix (depending on number of stress BC)
c_reduced, & !< reduced stiffness (depending on number of stress BC)
sTimesC !< temp variable to check inversion
logical :: errmatinv
character(len=pSTRLEN):: formatString
mask_stressVector = .not. reshape(transpose(mask_stress), [9])
size_reduced = count(mask_stressVector)
if (size_reduced > 0) then
temp99_real = math_3333to99(rot_BC%rotate(C))
do i = 1,9; do j = 1,9
mask(i,j) = mask_stressVector(i) .and. mask_stressVector(j)
end do; end do
c_reduced = reshape(pack(temp99_Real,mask),[size_reduced,size_reduced])
allocate(s_reduced,mold = c_reduced)
call math_invert(s_reduced, errmatinv, c_reduced) ! invert reduced stiffness
if (any(IEEE_is_NaN(s_reduced))) errmatinv = .true.
!--------------------------------------------------------------------------------------------------
! check if inversion was successful
sTimesC = matmul(c_reduced,s_reduced)
errmatinv = errmatinv .or. any(dNeq(sTimesC,math_eye(size_reduced),1.0e-12_pREAL))
if (errmatinv) then
write(formatString, '(i2)') size_reduced
formatString = '(/,1x,a,/,'//trim(formatString)//'('//trim(formatString)//'(2x,es9.2,1x)/))'
print trim(formatString), 'C * S (load) ', transpose(matmul(c_reduced,s_reduced))
print trim(formatString), 'C (load) ', transpose(c_reduced)
print trim(formatString), 'S (load) ', transpose(s_reduced)
if (errmatinv) error stop 'matrix inversion error'
end if
temp99_real = reshape(unpack(reshape(s_reduced,[size_reduced**2]),reshape(mask,[81]),0.0_pREAL),[9,9])
else
temp99_real = 0.0_pREAL
end if
utilities_maskedCompliance = math_99to3333(temp99_Real)
end function utilities_maskedCompliance
!--------------------------------------------------------------------------------------------------
!> @brief Calculate constitutive response.
!--------------------------------------------------------------------------------------------------
subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
F,Delta_t,rotation_BC)
real(pREAL), intent(out), dimension(3,3,3,3) :: C_volAvg, C_minmaxAvg !< average stiffness
real(pREAL), intent(out), dimension(3,3) :: P_av !< average PK stress
real(pREAL), intent(out), dimension(3,3,cells(1),cells(2),cells3) :: P !< PK stress
real(pREAL), intent(in), dimension(3,3,cells(1),cells(2),cells3) :: F !< deformation gradient target
real(pREAL), intent(in) :: Delta_t !< loading time
type(tRotation), intent(in), optional :: rotation_BC !< rotation of load frame
integer :: i
integer(MPI_INTEGER_KIND) :: err_MPI
real(pREAL), dimension(3,3,3,3) :: dPdF_max, dPdF_min
real(pREAL) :: dPdF_norm_max, dPdF_norm_min
real(pREAL), dimension(2) :: valueAndRank !< pair of min/max norm of dPdF to synchronize min/max of dPdF
print'(/,1x,a)', '... evaluating constitutive response ......................................'
flush(IO_STDOUT)
homogenization_F = reshape(F,[3,3,product(cells(1:2))*cells3]) ! set materialpoint target F to estimated field
call homogenization_mechanical_response(Delta_t,1,product(cells(1:2))*cells3) ! calculate P field
P = reshape(homogenization_P, [3,3,cells(1),cells(2),cells3])
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt
call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
call parallelization_chkerr(err_MPI)
if (present(rotation_BC)) then
if (any(dNeq(rotation_BC%asQuaternion(), real([1.0, 0.0, 0.0, 0.0],pREAL)))) &
print'(/,1x,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', &
'Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pREAL
P_av = rotation_BC%rotate(P_av)
end if
print'(/,1x,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', &
'Piola--Kirchhoff stress / MPa =', transpose(P_av)*1.e-6_pREAL
flush(IO_STDOUT)
dPdF_max = 0.0_pREAL
dPdF_norm_max = 0.0_pREAL
dPdF_min = huge(1.0_pREAL)
dPdF_norm_min = huge(1.0_pREAL)
do i = 1, product(cells(1:2))*cells3
if (dPdF_norm_max < sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2)) then
dPdF_max = homogenization_dPdF(1:3,1:3,1:3,1:3,i)
dPdF_norm_max = sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2)
end if
if (dPdF_norm_min > sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2)) then
dPdF_min = homogenization_dPdF(1:3,1:3,1:3,1:3,i)
dPdF_norm_min = sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2)
end if
end do
valueAndRank = [dPdF_norm_max,real(worldrank,pREAL)]
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_WORLD,err_MPI)
call parallelization_chkerr(err_MPI)
call MPI_Bcast(dPdF_max,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI)
call parallelization_chkerr(err_MPI)
valueAndRank = [dPdF_norm_min,real(worldrank,pREAL)]
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_WORLD,err_MPI)
call parallelization_chkerr(err_MPI)
call MPI_Bcast(dPdF_min,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI)
call parallelization_chkerr(err_MPI)
C_minmaxAvg = 0.5_pREAL*(dPdF_max + dPdF_min)
C_volAvg = sum(homogenization_dPdF,dim=5)
call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
call parallelization_chkerr(err_MPI)
C_volAvg = C_volAvg * wgt
end subroutine utilities_constitutiveResponse
!--------------------------------------------------------------------------------------------------
!> @brief Calculate forward rate, either as local guess or as homogeneous add on.
!--------------------------------------------------------------------------------------------------
pure function utilities_calculateRate(heterogeneous,field0,field,dt,avRate)
real(pREAL), intent(in), dimension(3,3) :: &
avRate !< homogeneous addon
real(pREAL), intent(in) :: &
dt !< Delta_t between field0 and field
logical, intent(in) :: &
heterogeneous !< calculate field of rates
real(pREAL), intent(in), dimension(3,3,cells(1),cells(2),cells3) :: &
field0, & !< data of previous step
field !< data of current step
real(pREAL), dimension(3,3,cells(1),cells(2),cells3) :: &
utilities_calculateRate
utilities_calculateRate = merge((field-field0) / dt, &
spread(spread(spread(avRate,3,cells(1)),4,cells(2)),5,cells3), &
heterogeneous)
end function utilities_calculateRate
!--------------------------------------------------------------------------------------------------
!> @brief forwards a field with a pointwise given rate, if aim is given,
!> ensures that the average matches the aim
!--------------------------------------------------------------------------------------------------
function utilities_forwardTensorField(Delta_t,field_lastInc,rate,aim)
real(pREAL), intent(in) :: &
Delta_t !< Delta_t of current step
real(pREAL), intent(in), dimension(3,3,cells(1),cells(2),cells3) :: &
field_lastInc, & !< initial field
rate !< rate by which to forward
real(pREAL), intent(in), optional, dimension(3,3) :: &
aim !< average field value aim
real(pREAL), dimension(3,3,cells(1),cells(2),cells3) :: &
utilities_forwardTensorField
real(pREAL), dimension(3,3) :: fieldDiff !< <a + adot*t> - aim
integer(MPI_INTEGER_KIND) :: err_MPI
utilities_forwardTensorField = field_lastInc + rate*Delta_t
if (present(aim)) then !< correct to match average
fieldDiff = sum(sum(sum(utilities_forwardTensorField,dim=5),dim=4),dim=3)*wgt
call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
call parallelization_chkerr(err_MPI)
fieldDiff = fieldDiff - aim
utilities_forwardTensorField = utilities_forwardTensorField &
- spread(spread(spread(fieldDiff,3,cells(1)),4,cells(2)),5,cells3)
end if
end function utilities_forwardTensorField
end module grid_mech_utilities

View File

@ -43,7 +43,6 @@ module grid_thermal_spectral
type(tNumerics) :: num type(tNumerics) :: num
type(tSolutionParams) :: params
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! PETSc data ! PETSc data
SNES :: SNES_thermal SNES :: SNES_thermal
@ -56,7 +55,7 @@ module grid_thermal_spectral
! reference diffusion tensor, mobility etc. ! reference diffusion tensor, mobility etc.
integer :: totalIter = 0 !< total iteration in current increment integer :: totalIter = 0 !< total iteration in current increment
real(pREAL), dimension(3,3) :: K_ref real(pREAL), dimension(3,3) :: K_ref
real(pREAL) :: mu_ref real(pREAL) :: mu_ref, Delta_t_
public :: & public :: &
grid_thermal_spectral_init, & grid_thermal_spectral_init, &
@ -124,7 +123,7 @@ subroutine grid_thermal_spectral_init(num_grid)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call MPI_Allgather(int(cells3,pPETSCINT),1_MPI_INTEGER_KIND,MPI_INTEGER,& call MPI_Allgather(int(cells3,pPETSCINT),1_MPI_INTEGER_KIND,MPI_INTEGER,&
cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI) cells3_global,1_MPI_INTEGER_KIND,MPI_INTEGER,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call DMDACreate3D(PETSC_COMM_WORLD, & call DMDACreate3D(PETSC_COMM_WORLD, &
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
@ -186,8 +185,7 @@ end subroutine grid_thermal_spectral_init
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
function grid_thermal_spectral_solution(Delta_t) result(solution) function grid_thermal_spectral_solution(Delta_t) result(solution)
real(pREAL), intent(in) :: & real(pREAL), intent(in) :: Delta_t !< increment in time for current solution
Delta_t !< increment in time for current solution
type(tSolutionState) :: solution type(tSolutionState) :: solution
PetscInt :: devNull PetscInt :: devNull
@ -201,7 +199,7 @@ function grid_thermal_spectral_solution(Delta_t) result(solution)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! set module wide availabe data ! set module wide availabe data
params%Delta_t = Delta_t Delta_t_ = Delta_t
call SNESSolve(SNES_thermal,PETSC_NULL_VEC,T_PETSc,err_PETSc) call SNESSolve(SNES_thermal,PETSC_NULL_VEC,T_PETSc,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
@ -220,14 +218,14 @@ function grid_thermal_spectral_solution(Delta_t) result(solution)
T_max = maxval(T) T_max = maxval(T)
stagNorm = maxval(abs(T - T_stagInc)) stagNorm = maxval(abs(T - T_stagInc))
call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,stagNorm,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_MAX,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*T_max) solution%stagConverged = stagNorm < max(num%eps_thermal_atol, num%eps_thermal_rtol*T_max)
call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,solution%stagConverged,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LAND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
T_stagInc = T T_stagInc = T
call homogenization_thermal_setField(reshape(T,[product(cells(1:2))*cells3]), & call homogenization_thermal_setField(reshape(T,[product(cells(1:2))*cells3]), &
reshape(T-T_lastInc,[product(cells(1:2))*cells3])/params%Delta_t) reshape(T-T_lastInc,[product(cells(1:2))*cells3])/Delta_t_)
call DMDAVecRestoreArrayF90(DM_thermal,T_PETSc,T,err_PETSc) call DMDAVecRestoreArrayF90(DM_thermal,T_PETSc,T,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
@ -264,7 +262,7 @@ subroutine grid_thermal_spectral_forward(cutBack)
T = T_lastInc T = T_lastInc
T_stagInc = T_lastInc T_stagInc = T_lastInc
else else
dotT_lastInc = (T - T_lastInc)/params%Delta_t dotT_lastInc = (T - T_lastInc)/Delta_t_
T_lastInc = T T_lastInc = T
call updateReference() call updateReference()
end if end if
@ -325,6 +323,8 @@ subroutine formResidual(residual_subdomain,x_scal,r,dummy,err_PETSc)
real(pREAL), dimension(3,cells(1),cells(2),cells3) :: vectorField real(pREAL), dimension(3,cells(1),cells(2),cells3) :: vectorField
call homogenization_thermal_response(Delta_t_,1,product(cells(1:2))*cells3)
associate(T => x_scal) associate(T => x_scal)
vectorField = utilities_ScalarGradient(T) vectorField = utilities_ScalarGradient(T)
ce = 0 ce = 0
@ -336,13 +336,13 @@ subroutine formResidual(residual_subdomain,x_scal,r,dummy,err_PETSc)
ce = 0 ce = 0
do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1) do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
ce = ce + 1 ce = ce + 1
r(i,j,k) = params%Delta_t*(r(i,j,k) + homogenization_f_T(ce)) & r(i,j,k) = Delta_t_*(r(i,j,k) + homogenization_f_T(ce)) &
+ homogenization_mu_T(ce) * (T_lastInc(i,j,k) - T(i,j,k)) & + homogenization_mu_T(ce) * (T_lastInc(i,j,k) - T(i,j,k)) &
+ mu_ref*T(i,j,k) + mu_ref*T(i,j,k)
end do; end do; end do end do; end do; end do
r = T & r = T &
- utilities_GreenConvolution(r, K_ref, mu_ref, params%Delta_t) - utilities_GreenConvolution(r, K_ref, mu_ref, Delta_t_)
end associate end associate
err_PETSc = 0 err_PETSc = 0
@ -367,10 +367,10 @@ subroutine updateReference()
K_ref = K_ref*wgt K_ref = K_ref*wgt
call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,K_ref,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
mu_ref = mu_ref*wgt mu_ref = mu_ref*wgt
call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,mu_ref,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
end subroutine updateReference end subroutine updateReference

View File

@ -75,22 +75,9 @@ module spectral_utilities
termIll = .false. termIll = .false.
end type tSolutionState end type tSolutionState
type, public :: tBoundaryCondition !< set of parameters defining a boundary condition
real(pREAL), dimension(3,3) :: values = 0.0_pREAL
logical, dimension(3,3) :: mask = .true.
character(len=:), allocatable :: myType
end type tBoundaryCondition
type, public :: tSolutionParams
real(pREAL), dimension(3,3) :: stress_BC
logical, dimension(3,3) :: stress_mask
type(tRotation) :: rotation_BC
real(pREAL) :: Delta_t
end type tSolutionParams
type :: tNumerics type :: tNumerics
integer :: & integer :: &
divergence_correction !< scale divergence/curl calculation: [0: no correction, 1: size scaled to 1, 2: size scaled to Npoints] divergence_correction !< scale divergence/curl calculation
logical :: & logical :: &
memory_efficient !< calculate gamma operator on the fly memory_efficient !< calculate gamma operator on the fly
end type tNumerics end type tNumerics
@ -121,10 +108,6 @@ module spectral_utilities
utilities_curlRMS, & utilities_curlRMS, &
utilities_scalarGradient, & utilities_scalarGradient, &
utilities_vectorDivergence, & utilities_vectorDivergence, &
utilities_maskedCompliance, &
utilities_constitutiveResponse, &
utilities_calculateRate, &
utilities_forwardTensorField, &
utilities_updateCoords utilities_updateCoords
contains contains
@ -580,7 +563,7 @@ real(pREAL) function utilities_divergenceRMS(tensorField)
conjg(-xi1st(1:3,cells1Red,k,j))*rescaledGeom))**2) conjg(-xi1st(1:3,cells1Red,k,j))*rescaledGeom))**2)
end do; end do end do; end do
call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
if (cells(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pREAL ! counted twice in case of cells(1) == 1 if (cells(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pREAL ! counted twice in case of cells(1) == 1
@ -646,72 +629,13 @@ real(pREAL) function utilities_curlRMS(tensorField)
end do; end do end do; end do
call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
utilities_curlRMS = sqrt(utilities_curlRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space utilities_curlRMS = sqrt(utilities_curlRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
if (cells(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pREAL ! counted twice in case of cells(1) == 1 if (cells(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pREAL ! counted twice in case of cells(1) == 1
end function utilities_curlRMS end function utilities_curlRMS
!--------------------------------------------------------------------------------------------------
!> @brief Calculate masked compliance tensor used to adjust F to fullfill stress BC.
!--------------------------------------------------------------------------------------------------
function utilities_maskedCompliance(rot_BC,mask_stress,C)
real(pREAL), dimension(3,3,3,3) :: utilities_maskedCompliance !< masked compliance
real(pREAL), intent(in), dimension(3,3,3,3) :: C !< current average stiffness
type(tRotation), intent(in) :: rot_BC !< rotation of load frame
logical, intent(in), dimension(3,3) :: mask_stress !< mask of stress BC
integer :: i, j
logical, dimension(9) :: mask_stressVector
logical, dimension(9,9) :: mask
real(pREAL), dimension(9,9) :: temp99_real
integer :: size_reduced = 0
real(pREAL), dimension(:,:), allocatable :: &
s_reduced, & !< reduced compliance matrix (depending on number of stress BC)
c_reduced, & !< reduced stiffness (depending on number of stress BC)
sTimesC !< temp variable to check inversion
logical :: errmatinv
character(len=pSTRLEN):: formatString
mask_stressVector = .not. reshape(transpose(mask_stress), [9])
size_reduced = count(mask_stressVector)
if (size_reduced > 0) then
temp99_real = math_3333to99(rot_BC%rotate(C))
do i = 1,9; do j = 1,9
mask(i,j) = mask_stressVector(i) .and. mask_stressVector(j)
end do; end do
c_reduced = reshape(pack(temp99_Real,mask),[size_reduced,size_reduced])
allocate(s_reduced,mold = c_reduced)
call math_invert(s_reduced, errmatinv, c_reduced) ! invert reduced stiffness
if (any(IEEE_is_NaN(s_reduced))) errmatinv = .true.
!--------------------------------------------------------------------------------------------------
! check if inversion was successful
sTimesC = matmul(c_reduced,s_reduced)
errmatinv = errmatinv .or. any(dNeq(sTimesC,math_eye(size_reduced),1.0e-12_pREAL))
if (errmatinv) then
write(formatString, '(i2)') size_reduced
formatString = '(/,1x,a,/,'//trim(formatString)//'('//trim(formatString)//'(2x,es9.2,1x)/))'
print trim(formatString), 'C * S (load) ', transpose(matmul(c_reduced,s_reduced))
print trim(formatString), 'C (load) ', transpose(c_reduced)
print trim(formatString), 'S (load) ', transpose(s_reduced)
if (errmatinv) error stop 'matrix inversion error'
end if
temp99_real = reshape(unpack(reshape(s_reduced,[size_reduced**2]),reshape(mask,[81]),0.0_pREAL),[9,9])
else
temp99_real = 0.0_pREAL
end if
utilities_maskedCompliance = math_99to3333(temp99_Real)
end function utilities_maskedCompliance
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief Calculate gradient of scalar field. !> @brief Calculate gradient of scalar field.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -755,147 +679,6 @@ function utilities_vectorDivergence(field) result(div)
end function utilities_vectorDivergence end function utilities_vectorDivergence
!--------------------------------------------------------------------------------------------------
!> @brief calculate constitutive response from homogenization_F0 to F during Delta_t
!--------------------------------------------------------------------------------------------------
subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
F,Delta_t,rotation_BC)
real(pREAL), intent(out), dimension(3,3,3,3) :: C_volAvg, C_minmaxAvg !< average stiffness
real(pREAL), intent(out), dimension(3,3) :: P_av !< average PK stress
real(pREAL), intent(out), dimension(3,3,cells(1),cells(2),cells3) :: P !< PK stress
real(pREAL), intent(in), dimension(3,3,cells(1),cells(2),cells3) :: F !< deformation gradient target
real(pREAL), intent(in) :: Delta_t !< loading time
type(tRotation), intent(in), optional :: rotation_BC !< rotation of load frame
integer :: i
integer(MPI_INTEGER_KIND) :: err_MPI
real(pREAL), dimension(3,3,3,3) :: dPdF_max, dPdF_min
real(pREAL) :: dPdF_norm_max, dPdF_norm_min
real(pREAL), dimension(2) :: valueAndRank !< pair of min/max norm of dPdF to synchronize min/max of dPdF
print'(/,1x,a)', '... evaluating constitutive response ......................................'
flush(IO_STDOUT)
homogenization_F = reshape(F,[3,3,product(cells(1:2))*cells3]) ! set materialpoint target F to estimated field
call homogenization_mechanical_response(Delta_t,1,product(cells(1:2))*cells3) ! calculate P field
if (.not. terminallyIll) &
call homogenization_thermal_response(Delta_t,1,product(cells(1:2))*cells3)
if (.not. terminallyIll) &
call homogenization_mechanical_response2(Delta_t,[1,1],[1,product(cells(1:2))*cells3])
P = reshape(homogenization_P, [3,3,cells(1),cells(2),cells3])
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt
call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
if (present(rotation_BC)) then
if (any(dNeq(rotation_BC%asQuaternion(), real([1.0, 0.0, 0.0, 0.0],pREAL)))) &
print'(/,1x,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', &
'Piola--Kirchhoff stress (lab) / MPa =', transpose(P_av)*1.e-6_pREAL
P_av = rotation_BC%rotate(P_av)
end if
print'(/,1x,a,/,2(3(2x,f12.4,1x)/),3(2x,f12.4,1x))', &
'Piola--Kirchhoff stress / MPa =', transpose(P_av)*1.e-6_pREAL
flush(IO_STDOUT)
dPdF_max = 0.0_pREAL
dPdF_norm_max = 0.0_pREAL
dPdF_min = huge(1.0_pREAL)
dPdF_norm_min = huge(1.0_pREAL)
do i = 1, product(cells(1:2))*cells3
if (dPdF_norm_max < sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2)) then
dPdF_max = homogenization_dPdF(1:3,1:3,1:3,1:3,i)
dPdF_norm_max = sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2)
end if
if (dPdF_norm_min > sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2)) then
dPdF_min = homogenization_dPdF(1:3,1:3,1:3,1:3,i)
dPdF_norm_min = sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2)
end if
end do
valueAndRank = [dPdF_norm_max,real(worldrank,pREAL)]
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MAXLOC,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_Bcast(dPdF_max,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
valueAndRank = [dPdF_norm_min,real(worldrank,pREAL)]
call MPI_Allreduce(MPI_IN_PLACE,valueAndRank,1_MPI_INTEGER_KIND,MPI_2DOUBLE_PRECISION,MPI_MINLOC,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
call MPI_Bcast(dPdF_min,81_MPI_INTEGER_KIND,MPI_DOUBLE,int(valueAndRank(2),MPI_INTEGER_KIND),MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
C_minmaxAvg = 0.5_pREAL*(dPdF_max + dPdF_min)
C_volAvg = sum(homogenization_dPdF,dim=5)
call MPI_Allreduce(MPI_IN_PLACE,C_volAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
C_volAvg = C_volAvg * wgt
end subroutine utilities_constitutiveResponse
!--------------------------------------------------------------------------------------------------
!> @brief Calculate forward rate, either as local guess or as homogeneous add on.
!--------------------------------------------------------------------------------------------------
pure function utilities_calculateRate(heterogeneous,field0,field,dt,avRate)
real(pREAL), intent(in), dimension(3,3) :: &
avRate !< homogeneous addon
real(pREAL), intent(in) :: &
dt !< Delta_t between field0 and field
logical, intent(in) :: &
heterogeneous !< calculate field of rates
real(pREAL), intent(in), dimension(3,3,cells(1),cells(2),cells3) :: &
field0, & !< data of previous step
field !< data of current step
real(pREAL), dimension(3,3,cells(1),cells(2),cells3) :: &
utilities_calculateRate
utilities_calculateRate = merge((field-field0) / dt, &
spread(spread(spread(avRate,3,cells(1)),4,cells(2)),5,cells3), &
heterogeneous)
end function utilities_calculateRate
!--------------------------------------------------------------------------------------------------
!> @brief forwards a field with a pointwise given rate, if aim is given,
!> ensures that the average matches the aim
!--------------------------------------------------------------------------------------------------
function utilities_forwardTensorField(Delta_t,field_lastInc,rate,aim)
real(pREAL), intent(in) :: &
Delta_t !< Delta_t of current step
real(pREAL), intent(in), dimension(3,3,cells(1),cells(2),cells3) :: &
field_lastInc, & !< initial field
rate !< rate by which to forward
real(pREAL), intent(in), optional, dimension(3,3) :: &
aim !< average field value aim
real(pREAL), dimension(3,3,cells(1),cells(2),cells3) :: &
utilities_forwardTensorField
real(pREAL), dimension(3,3) :: fieldDiff !< <a + adot*t> - aim
integer(MPI_INTEGER_KIND) :: err_MPI
utilities_forwardTensorField = field_lastInc + rate*Delta_t
if (present(aim)) then !< correct to match average
fieldDiff = sum(sum(sum(utilities_forwardTensorField,dim=5),dim=4),dim=3)*wgt
call MPI_Allreduce(MPI_IN_PLACE,fieldDiff,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
fieldDiff = fieldDiff - aim
utilities_forwardTensorField = utilities_forwardTensorField &
- spread(spread(spread(fieldDiff,3,cells(1)),4,cells(2)),5,cells3)
end if
end function utilities_forwardTensorField
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief Calculate Filter for Fourier convolution. !> @brief Calculate Filter for Fourier convolution.
!> @details this is the full operator to calculate derivatives, i.e. 2 \pi i k for the !> @details this is the full operator to calculate derivatives, i.e. 2 \pi i k for the
@ -995,7 +778,7 @@ subroutine utilities_updateCoords(F)
! average F ! average F
if (cells3Offset == 0) Favg = tensorField_fourier(1:3,1:3,1,1,1)%re*wgt if (cells3Offset == 0) Favg = tensorField_fourier(1:3,1:3,1,1,1)%re*wgt
call MPI_Bcast(Favg,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(Favg,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! integration in Fourier space to get fluctuations of cell center displacements ! integration in Fourier space to get fluctuations of cell center displacements
@ -1021,18 +804,18 @@ subroutine utilities_updateCoords(F)
! send bottom layer to process below ! send bottom layer to process below
call MPI_Isend(u_tilde_p_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(1),err_MPI) call MPI_Isend(u_tilde_p_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(1),err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call MPI_Irecv(u_tilde_p_padded(:,:,:,cells3+1),c,MPI_DOUBLE,rank_t,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(2),err_MPI) call MPI_Irecv(u_tilde_p_padded(:,:,:,cells3+1),c,MPI_DOUBLE,rank_t,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(2),err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
! send top layer to process above ! send top layer to process above
call MPI_Isend(u_tilde_p_padded(:,:,:,cells3) ,c,MPI_DOUBLE,rank_t,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(3),err_MPI) call MPI_Isend(u_tilde_p_padded(:,:,:,cells3) ,c,MPI_DOUBLE,rank_t,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(3),err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call MPI_Irecv(u_tilde_p_padded(:,:,:,0), c,MPI_DOUBLE,rank_b,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(4),err_MPI) call MPI_Irecv(u_tilde_p_padded(:,:,:,0), c,MPI_DOUBLE,rank_b,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(4),err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call MPI_Waitall(4,request,status,err_MPI) call MPI_Waitall(4,request,status,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY) #if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
! ToDo ! ToDo
#else #else
@ -1085,7 +868,7 @@ subroutine selfTest()
call fftw_mpi_execute_dft_r2c(planTensorForth,tensorField_real,tensorField_fourier) call fftw_mpi_execute_dft_r2c(planTensorForth,tensorField_real,tensorField_fourier)
call MPI_Allreduce(sum(sum(sum(tensorField_real_,dim=5),dim=4),dim=3),tensorSum,9_MPI_INTEGER_KIND, & call MPI_Allreduce(sum(sum(sum(tensorField_real_,dim=5),dim=4),dim=3),tensorSum,9_MPI_INTEGER_KIND, &
MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
if (worldrank==0) then if (worldrank==0) then
if (any(dNeq(tensorSum/tensorField_fourier(:,:,1,1,1)%re,1.0_pREAL,1.0e-12_pREAL))) & if (any(dNeq(tensorSum/tensorField_fourier(:,:,1,1,1)%re,1.0_pREAL,1.0e-12_pREAL))) &
error stop 'mismatch avg tensorField FFT <-> real' error stop 'mismatch avg tensorField FFT <-> real'
@ -1101,7 +884,7 @@ subroutine selfTest()
call fftw_mpi_execute_dft_r2c(planVectorForth,vectorField_real,vectorField_fourier) call fftw_mpi_execute_dft_r2c(planVectorForth,vectorField_real,vectorField_fourier)
call MPI_Allreduce(sum(sum(sum(vectorField_real_,dim=4),dim=3),dim=2),vectorSum,3_MPI_INTEGER_KIND, & call MPI_Allreduce(sum(sum(sum(vectorField_real_,dim=4),dim=3),dim=2),vectorSum,3_MPI_INTEGER_KIND, &
MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
if (worldrank==0) then if (worldrank==0) then
if (any(dNeq(vectorSum/vectorField_fourier(:,1,1,1)%re,1.0_pREAL,1.0e-12_pREAL))) & if (any(dNeq(vectorSum/vectorField_fourier(:,1,1,1)%re,1.0_pREAL,1.0e-12_pREAL))) &
error stop 'mismatch avg vectorField FFT <-> real' error stop 'mismatch avg vectorField FFT <-> real'
@ -1117,7 +900,7 @@ subroutine selfTest()
call fftw_mpi_execute_dft_r2c(planScalarForth,scalarField_real,scalarField_fourier) call fftw_mpi_execute_dft_r2c(planScalarForth,scalarField_real,scalarField_fourier)
call MPI_Allreduce(sum(sum(sum(scalarField_real_,dim=3),dim=2),dim=1),scalarSum,1_MPI_INTEGER_KIND, & call MPI_Allreduce(sum(sum(sum(scalarField_real_,dim=3),dim=2),dim=1),scalarSum,1_MPI_INTEGER_KIND, &
MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
if (worldrank==0) then if (worldrank==0) then
if (dNeq(scalarSum/scalarField_fourier(1,1,1)%re,1.0_pREAL,1.0e-12_pREAL)) & if (dNeq(scalarSum/scalarField_fourier(1,1,1)%re,1.0_pREAL,1.0e-12_pREAL)) &
error stop 'mismatch avg scalarField FFT <-> real' error stop 'mismatch avg scalarField FFT <-> real'
@ -1129,7 +912,7 @@ subroutine selfTest()
call random_number(r) call random_number(r)
call MPI_Bcast(r,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(r,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
scalarField_real_ = r(1,1) scalarField_real_ = r(1,1)
if (maxval(abs(utilities_scalarGradient(scalarField_real_)))>5.0e-9_pREAL) error stop 'non-zero grad(const)' if (maxval(abs(utilities_scalarGradient(scalarField_real_)))>5.0e-9_pREAL) error stop 'non-zero grad(const)'

View File

@ -52,7 +52,6 @@ module homogenization
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! General variables for the homogenization at a material point ! General variables for the homogenization at a material point
real(pREAL), dimension(:,:,:), allocatable, public :: & real(pREAL), dimension(:,:,:), allocatable, public :: &
homogenization_F0, & !< def grad of IP at start of FE increment
homogenization_F !< def grad of IP to be reached at end of FE increment homogenization_F !< def grad of IP to be reached at end of FE increment
real(pREAL), dimension(:,:,:), allocatable, public :: & !, protected :: & Issue with ifort real(pREAL), dimension(:,:,:), allocatable, public :: & !, protected :: & Issue with ifort
homogenization_P !< first P--K stress of IP homogenization_P !< first P--K stress of IP
@ -169,7 +168,6 @@ module homogenization
public :: & public :: &
homogenization_init, & homogenization_init, &
homogenization_mechanical_response, & homogenization_mechanical_response, &
homogenization_mechanical_response2, &
homogenization_thermal_response, & homogenization_thermal_response, &
homogenization_thermal_active, & homogenization_thermal_active, &
homogenization_mu_T, & homogenization_mu_T, &
@ -228,7 +226,8 @@ subroutine homogenization_mechanical_response(Delta_t,cell_start,cell_end)
doneAndHappy doneAndHappy
!$OMP PARALLEL DO PRIVATE(en,ho,co,converged,doneAndHappy) !$OMP PARALLEL
!$OMP DO PRIVATE(en,ho,co,converged,doneAndHappy)
do ce = cell_start, cell_end do ce = cell_start, cell_end
en = material_entry_homogenization(ce) en = material_entry_homogenization(ce)
@ -261,7 +260,18 @@ subroutine homogenization_mechanical_response(Delta_t,cell_start,cell_end)
terminallyIll = .true. terminallyIll = .true.
end if end if
end do end do
!$OMP END PARALLEL DO !$OMP END DO
!$OMP DO PRIVATE(ho)
do ce = cell_start, cell_end
ho = material_ID_homogenization(ce)
do co = 1, homogenization_Nconstituents(ho)
call crystallite_orientations(co,ce)
end do
call mechanical_homogenize(Delta_t,ce)
end do
!$OMP END DO
!$OMP END PARALLEL
end subroutine homogenization_mechanical_response end subroutine homogenization_mechanical_response
@ -274,6 +284,7 @@ subroutine homogenization_thermal_response(Delta_t,cell_start,cell_end)
real(pREAL), intent(in) :: Delta_t !< time increment real(pREAL), intent(in) :: Delta_t !< time increment
integer, intent(in) :: & integer, intent(in) :: &
cell_start, cell_end cell_start, cell_end
integer :: & integer :: &
co, ce, ho co, ce, ho
@ -294,40 +305,10 @@ subroutine homogenization_thermal_response(Delta_t,cell_start,cell_end)
end subroutine homogenization_thermal_response end subroutine homogenization_thermal_response
!--------------------------------------------------------------------------------------------------
!> @brief
!--------------------------------------------------------------------------------------------------
subroutine homogenization_mechanical_response2(Delta_t,FEsolving_execIP,FEsolving_execElem)
real(pREAL), intent(in) :: Delta_t !< time increment
integer, dimension(2), intent(in) :: FEsolving_execElem, FEsolving_execIP
integer :: &
ip, & !< integration point number
el, & !< element number
co, ce, ho
!$OMP PARALLEL DO PRIVATE(ho,ce)
elementLooping3: do el = FEsolving_execElem(1),FEsolving_execElem(2)
IpLooping3: do ip = FEsolving_execIP(1),FEsolving_execIP(2)
ce = (el-1)*discretization_nIPs + ip
ho = material_ID_homogenization(ce)
do co = 1, homogenization_Nconstituents(ho)
call crystallite_orientations(co,ip,el)
end do
call mechanical_homogenize(Delta_t,ce)
end do IpLooping3
end do elementLooping3
!$OMP END PARALLEL DO
end subroutine homogenization_mechanical_response2
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief writes homogenization results to HDF5 output file !> @brief writes homogenization results to HDF5 output file
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine homogenization_result subroutine homogenization_result()
integer :: ho integer :: ho
character(len=:), allocatable :: group_base,group character(len=:), allocatable :: group_base,group
@ -362,7 +343,7 @@ end subroutine homogenization_result
!> @brief Forward data after successful increment. !> @brief Forward data after successful increment.
! ToDo: Any guessing for the current states possible? ! ToDo: Any guessing for the current states possible?
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine homogenization_forward subroutine homogenization_forward()
integer :: ho integer :: ho

View File

@ -77,8 +77,7 @@ module subroutine mechanical_init()
call parseMechanical() call parseMechanical()
allocate(homogenization_dPdF(3,3,3,3,discretization_Ncells), source=0.0_pREAL) allocate(homogenization_dPdF(3,3,3,3,discretization_Ncells), source=0.0_pREAL)
homogenization_F0 = spread(math_I3,3,discretization_Ncells) homogenization_F = spread(math_I3,3,discretization_Ncells)
homogenization_F = homogenization_F0
allocate(homogenization_P(3,3,discretization_Ncells),source=0.0_pREAL) allocate(homogenization_P(3,3,discretization_Ncells),source=0.0_pREAL)
if (any(mechanical_type == MECHANICAL_PASS_ID)) call pass_init() if (any(mechanical_type == MECHANICAL_PASS_ID)) call pass_init()

View File

@ -23,43 +23,32 @@ program DAMASK_mesh
implicit none(type,external) implicit none(type,external)
type :: tLoadCase type :: tLoadCase
real(pREAL) :: time = 0.0_pREAL !< length of increment real(pREAL) :: t = 0.0_pREAL !< length of increment
integer :: incs = 0, & !< number of increments integer :: N = 0, & !< number of increments
outputfrequency = 1 !< frequency of result writes f_out = 1 !< frequency of result writes
logical :: followFormerTrajectory = .true. !< follow trajectory of former loadcase logical :: estimate_rate = .true. !< follow trajectory of former loadcase
integer, allocatable, dimension(:) :: faceID type(tMechBC), allocatable, dimension(:) :: mechBC
type(tFieldBC), allocatable, dimension(:) :: fieldBC
end type tLoadCase end type tLoadCase
!--------------------------------------------------------------------------------------------------
! variables related to information from load case and geom file
integer, allocatable, dimension(:) :: chunkPos ! this is longer than needed for geometry parsing
integer :: &
N_def = 0 !< # of rate of deformation specifiers found in load case file
character(len=:), allocatable :: &
line
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! loop variables, convergence etc. ! loop variables, convergence etc.
integer, parameter :: & integer, parameter :: &
subStepFactor = 2 !< for each substep, divide the last time increment by 2.0 subStepFactor = 2 !< for each substep, divide the last time increment by 2.0
real(pREAL) :: & real(pREAL) :: &
time = 0.0_pREAL, & !< elapsed time t = 0.0_pREAL, & !< elapsed time
time0 = 0.0_pREAL, & !< begin of interval t_0 = 0.0_pREAL, & !< begin of interval
timeinc = 0.0_pREAL, & !< current time interval Delta_t = 0.0_pREAL, & !< current time interval
timeIncOld = 0.0_pREAL, & !< previous time interval Delta_t_prev = 0.0_pREAL !< previous time interval
remainingLoadCaseTime = 0.0_pREAL !< remaining time of current load case
logical :: & logical :: &
guess, & !< guess along former trajectory guess, & !< guess along former trajectory
stagIterate stagIterate
integer :: & integer :: &
l, & l, &
i, & m, &
errorID, & errorID, &
cutBackLevel = 0, & !< cut back level \f$ t = \frac{t_{inc}}{2^l} \f$ cutBackLevel = 0, & !< cut back level \f$ t = \frac{t_{inc}}{2^l} \f$
stepFraction = 0, & !< fraction of current time interval stepFraction = 0, & !< fraction of current time interval
currentLoadcase = 0, & !< current load case
currentFace = 0, &
inc, & !< current increment in current load case inc, & !< current increment in current load case
totalIncsCounter = 0, & !< total # of increments totalIncsCounter = 0, & !< total # of increments
statUnit = 0, & !< file unit for statistics output statUnit = 0, & !< file unit for statistics output
@ -67,8 +56,16 @@ program DAMASK_mesh
component component
type(tDict), pointer :: & type(tDict), pointer :: &
num_solver, & num_solver, &
num_mesh num_mesh, &
character(len=pSTRLEN), dimension(:), allocatable :: fileContent load, &
load_step, &
step_bc, &
mech_BC, &
step_discretization
type(tList), pointer :: &
load_steps, &
mech_u, &
step_mech
character(len=pSTRLEN) :: & character(len=pSTRLEN) :: &
incInfo, & incInfo, &
loadcase_string loadcase_string
@ -80,9 +77,11 @@ program DAMASK_mesh
type(tSolutionState), allocatable, dimension(:) :: solres type(tSolutionState), allocatable, dimension(:) :: solres
PetscInt :: faceSet, currentFaceSet, dimPlex PetscInt :: faceSet, currentFaceSet, dimPlex
PetscErrorCode :: err_PETSc PetscErrorCode :: err_PETSc
integer(kind(COMPONENT_UNDEFINED_ID)) :: ID
external :: & external :: &
quit quit
character(len=:), allocatable :: &
fileContent, fname
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! init DAMASK (all modules) ! init DAMASK (all modules)
@ -104,135 +103,92 @@ program DAMASK_mesh
CHKERRA(err_PETSc) CHKERRA(err_PETSc)
allocate(solres(1)) allocate(solres(1))
!-------------------------------------------------------------------------------------------------- if (worldrank == 0) then
! reading basic information from load case file and allocate data structure containing load cases fileContent = IO_read(CLI_loadFile)
fileContent = IO_readlines(trim(CLI_loadFile)) fname = CLI_loadFile
do l = 1, size(fileContent) if (scan(fname,'/') /= 0) fname = fname(scan(fname,'/',.true.)+1:)
line = fileContent(l) call result_openJobFile(parallel=.false.)
if (IO_isBlank(line)) cycle ! skip empty lines call result_addSetupFile(fileContent,fname,'load case definition (mesh solver)')
call result_closeJobFile()
end if
chunkPos = IO_strPos(line) call parallelization_bcast_str(fileContent)
do i = 1, chunkPos(1) ! reading compulsory parameters for loadcase load => YAML_parse_str_asDict(fileContent)
select case (IO_strValue(line,chunkPos,i)) load_steps => load%get_list('loadstep')
case('$Loadcase')
N_def = N_def + 1 allocate(loadCases(load_steps%length))
end select
end do ! count all identifiers to allocate memory and do sanity check
do l = 1, load_steps%length
load_step => load_steps%get_dict(l)
step_bc => load_step%get_dict('boundary_conditions')
step_mech => step_bc%get_list('mechanical')
allocate(loadCases(l)%mechBC(mesh_Nboundaries))
loadCases(l)%mechBC(:)%nComponents = dimPlex !< X, Y (, Z) displacements
do faceSet = 1, mesh_Nboundaries
allocate(loadCases(l)%mechBC(faceSet)%Value(dimPlex), source = 0.0_pREAL)
allocate(loadCases(l)%mechBC(faceSet)%Mask(dimPlex), source = .false.)
end do end do
if (N_def < 1) call IO_error(error_ID = 837) do m = 1, step_mech%length
allocate(loadCases(N_def)) mech_BC => step_mech%get_dict(m)
do i = 1, size(loadCases)
allocate(loadCases(i)%fieldBC(1))
loadCases(i)%fieldBC(1)%ID = FIELD_MECH_ID
end do
do i = 1, size(loadCases)
loadCases(i)%fieldBC(1)%nComponents = dimPlex !< X, Y (, Z) displacements
allocate(loadCases(i)%fieldBC(1)%componentBC(loadCases(i)%fieldBC(1)%nComponents))
do component = 1, loadCases(i)%fieldBC(1)%nComponents
select case (component)
case (1)
loadCases(i)%fieldBC(1)%componentBC(component)%ID = COMPONENT_MECH_X_ID
case (2)
loadCases(i)%fieldBC(1)%componentBC(component)%ID = COMPONENT_MECH_Y_ID
case (3)
loadCases(i)%fieldBC(1)%componentBC(component)%ID = COMPONENT_MECH_Z_ID
end select
end do
do component = 1, loadCases(i)%fieldBC(1)%nComponents
allocate(loadCases(i)%fieldBC(1)%componentBC(component)%Value(mesh_Nboundaries), source = 0.0_pREAL)
allocate(loadCases(i)%fieldBC(1)%componentBC(component)%Mask (mesh_Nboundaries), source = .false.)
end do
end do
!--------------------------------------------------------------------------------------------------
! reading the load case and assign values to the allocated data structure
do l = 1, size(fileContent)
line = fileContent(l)
if (IO_isBlank(line)) cycle ! skip empty lines
chunkPos = IO_strPos(line)
do i = 1, chunkPos(1)
select case (IO_strValue(line,chunkPos,i))
!--------------------------------------------------------------------------------------------------
! loadcase information
case('$Loadcase')
currentLoadCase = IO_intValue(line,chunkPos,i+1)
case('Face')
currentFace = IO_intValue(line,chunkPos,i+1)
currentFaceSet = -1 currentFaceSet = -1
do faceSet = 1, mesh_Nboundaries do faceSet = 1, mesh_Nboundaries
if (mesh_boundaries(faceSet) == currentFace) currentFaceSet = faceSet if (mesh_boundaries(faceSet) == mech_BC%get_asInt('tag')) currentFaceSet = faceSet
end do end do
if (currentFaceSet < 0) call IO_error(error_ID = 837, ext_msg = 'invalid BC') if (currentFaceSet < 0) call IO_error(error_ID = 837, ext_msg = 'invalid BC')
case('t') mech_u => mech_BC%get_list('dot_u')
loadCases(currentLoadCase)%time = IO_realValue(line,chunkPos,i+1) do component = 1, dimPlex
case('N') if (mech_u%get_asStr(component) /= 'x') then
loadCases(currentLoadCase)%incs = IO_intValue(line,chunkPos,i+1) loadCases(l)%mechBC(currentFaceSet)%Mask(component) = .true.
case('f_out') loadCases(l)%mechBC(currentFaceSet)%Value(component) = mech_u%get_asReal(component)
loadCases(currentLoadCase)%outputfrequency = IO_intValue(line,chunkPos,i+1)
case('estimate_rate')
loadCases(currentLoadCase)%followFormerTrajectory = .false. ! do not continue to predict deformation along former trajectory
!--------------------------------------------------------------------------------------------------
! boundary condition information
case('X','Y','Z')
select case(IO_strValue(line,chunkPos,i))
case('X')
ID = COMPONENT_MECH_X_ID
case('Y')
ID = COMPONENT_MECH_Y_ID
case('Z')
ID = COMPONENT_MECH_Z_ID
end select
do component = 1, loadcases(currentLoadCase)%fieldBC(1)%nComponents
if (loadCases(currentLoadCase)%fieldBC(1)%componentBC(component)%ID == ID) then
loadCases(currentLoadCase)%fieldBC(1)%componentBC(component)%Mask (currentFaceSet) = &
.true.
loadCases(currentLoadCase)%fieldBC(1)%componentBC(component)%Value(currentFaceSet) = &
IO_realValue(line,chunkPos,i+1)
end if end if
end do end do
end select
end do end do
step_discretization => load_step%get_dict('discretization')
loadCases(l)%t = step_discretization%get_asReal('t')
loadCases(l)%N = step_discretization%get_asInt ('N')
if (load_step%get_asStr('f_out',defaultVal='n/a') == 'none') then
loadCases(l)%f_out = huge(0)
else
loadCases(l)%f_out = load_step%get_asInt('f_out', defaultVal=1)
end if
loadCases(l)%estimate_rate = (load_step%get_asBool('estimate_rate',defaultVal=.true.) .and. l>1)
end do end do
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! consistency checks and output of load case ! consistency checks and output of load case
loadCases(1)%followFormerTrajectory = .false. ! cannot guess along trajectory for first inc of first currentLoadCase
errorID = 0 errorID = 0
checkLoadcases: do currentLoadCase = 1, size(loadCases) checkLoadcases: do l = 1, load_steps%length
write (loadcase_string, '(i0)' ) currentLoadCase write (loadcase_string, '(i0)' ) l
print'(/,1x,a,1x,i0)', 'load case:', currentLoadCase print'(/,1x,a,1x,i0)', 'load case:', l
if (.not. loadCases(currentLoadCase)%followFormerTrajectory) & if (.not. loadCases(l)%estimate_rate) &
print'(2x,a)', 'drop guessing along trajectory' print'(2x,a)', 'drop guessing along trajectory'
print'(2x,a)', 'Field '//trim(FIELD_MECH_label) print'(2x,a)', 'Field '//trim(FIELD_MECH_label)
do faceSet = 1, mesh_Nboundaries do faceSet = 1, mesh_Nboundaries
do component = 1, loadCases(currentLoadCase)%fieldBC(1)%nComponents do component = 1, dimPlex
if (loadCases(currentLoadCase)%fieldBC(1)%componentBC(component)%Mask(faceSet)) & if (loadCases(l)%mechBC(faceSet)%Mask(component)) &
print'(a,i2,a,i2,a,f12.7)', & print'(a,i2,a,i2,a,f12.7)', &
' Face ', mesh_boundaries(faceSet), & ' Face ', mesh_boundaries(faceSet), &
' Component ', component, & ' Component ', component, &
' Value ', loadCases(currentLoadCase)%fieldBC(1)%componentBC(component)%Value(faceSet) ' Value ', loadCases(l)%mechBC(faceSet)%Value(component)
end do end do
end do end do
print'(2x,a,f12.6)', 'time: ', loadCases(currentLoadCase)%time print'(2x,a,f12.6)', 'time: ', loadCases(l)%t
if (loadCases(currentLoadCase)%incs < 1) errorID = 835 ! non-positive incs count if (loadCases(l)%N < 1) errorID = 835 ! non-positive incs count
print'(2x,a,i5)', 'increments: ', loadCases(currentLoadCase)%incs print'(2x,a,i5)', 'increments: ', loadCases(l)%N
if (loadCases(currentLoadCase)%outputfrequency < 1) errorID = 836 ! non-positive result frequency if (loadCases(l)%f_out < 1) errorID = 836 ! non-positive result frequency
print'(2x,a,i5)', 'output frequency: ', & print'(2x,a,i5)', 'output frequency: ', &
loadCases(currentLoadCase)%outputfrequency loadCases(l)%f_out
if (errorID > 0) call IO_error(error_ID = errorID, ext_msg = loadcase_string) ! exit with error message if (errorID > 0) call IO_error(error_ID = errorID, ext_msg = loadcase_string) ! exit with error message
end do checkLoadcases end do checkLoadcases
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! doing initialization depending on active solvers ! doing initialization depending on active solvers
call FEM_Utilities_init(num_mesh) call FEM_Utilities_init(num_mesh)
call FEM_mechanical_init(loadCases(1)%fieldBC(1),num_mesh) call FEM_mechanical_init(loadCases(1)%mechBC,num_mesh)
call config_numerics_deallocate() call config_numerics_deallocate()
if (worldrank == 0) then if (worldrank == 0) then
@ -244,46 +200,45 @@ program DAMASK_mesh
flush(IO_STDOUT) flush(IO_STDOUT)
call materialpoint_result(0,0.0_pREAL) call materialpoint_result(0,0.0_pREAL)
loadCaseLooping: do currentLoadCase = 1, size(loadCases) loadCaseLooping: do l = 1, load_steps%length
time0 = time ! load case start time t_0 = t ! load case start time
guess = loadCases(currentLoadCase)%followFormerTrajectory ! change of load case? homogeneous guess for the first inc guess = loadCases(l)%estimate_rate ! change of load case? homogeneous guess for the first inc
incLooping: do inc = 1, loadCases(currentLoadCase)%incs incLooping: do inc = 1, loadCases(l)%N
totalIncsCounter = totalIncsCounter + 1 totalIncsCounter = totalIncsCounter + 1
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! forwarding time ! forwarding time
timeIncOld = timeinc ! last timeinc that brought former inc to an end Delta_t_prev = Delta_t ! last timeinc that brought former inc to an end
timeinc = loadCases(currentLoadCase)%time/real(loadCases(currentLoadCase)%incs,pREAL) Delta_t = loadCases(l)%t/real(loadCases(l)%N,pREAL)
timeinc = timeinc * real(subStepFactor,pREAL)**real(-cutBackLevel,pREAL) ! depending on cut back level, decrease time step Delta_t = Delta_t * real(subStepFactor,pREAL)**real(-cutBackLevel,pREAL) ! depending on cut back level, decrease time step
stepFraction = 0 ! fraction scaled by stepFactor**cutLevel stepFraction = 0 ! fraction scaled by stepFactor**cutLevel
subStepLooping: do while (stepFraction < subStepFactor**cutBackLevel) subStepLooping: do while (stepFraction < subStepFactor**cutBackLevel)
remainingLoadCaseTime = loadCases(currentLoadCase)%time+time0 - time t = t + Delta_t ! forward target time
time = time + timeinc ! forward target time
stepFraction = stepFraction + 1 ! count step stepFraction = stepFraction + 1 ! count step
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! report begin of new step ! report begin of new step
print'(/,1x,a)', '###########################################################################' print'(/,1x,a)', '###########################################################################'
print'(1x,a,es12.5,6(a,i0))',& print'(1x,a,es12.5,6(a,i0))',&
'Time', time, & 'Time', t, &
's: Increment ', inc, '/', loadCases(currentLoadCase)%incs,& 's: Increment ', inc, '/', loadCases(l)%N,&
'-', stepFraction, '/', subStepFactor**cutBackLevel,& '-', stepFraction, '/', subStepFactor**cutBackLevel,&
' of load case ', currentLoadCase,'/',size(loadCases) ' of load case ', l,'/',load_steps%length
write(incInfo,'(4(a,i0))') & write(incInfo,'(4(a,i0))') &
'Increment ',totalIncsCounter,'/',sum(loadCases%incs),& 'Increment ',totalIncsCounter,'/',sum(loadCases%N),&
'-',stepFraction, '/', subStepFactor**cutBackLevel '-',stepFraction, '/', subStepFactor**cutBackLevel
flush(IO_STDOUT) flush(IO_STDOUT)
call FEM_mechanical_forward(guess,timeinc,timeIncOld,loadCases(currentLoadCase)%fieldBC(1)) call FEM_mechanical_forward(guess,Delta_t,Delta_t_prev,loadCases(l)%mechBC)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! solve fields ! solve fields
stagIter = 0 stagIter = 0
stagIterate = .true. stagIterate = .true.
do while (stagIterate) do while (stagIterate)
solres(1) = FEM_mechanical_solution(incInfo,timeinc,timeIncOld,loadCases(currentLoadCase)%fieldBC(1)) solres(1) = FEM_mechanical_solution(incInfo,Delta_t,Delta_t_prev,loadCases(l)%mechBC)
if (.not. solres(1)%converged) exit if (.not. solres(1)%converged) exit
stagIter = stagIter + 1 stagIter = stagIter + 1
@ -299,8 +254,8 @@ program DAMASK_mesh
cutBack = .True. cutBack = .True.
stepFraction = (stepFraction - 1) * subStepFactor ! adjust to new denominator stepFraction = (stepFraction - 1) * subStepFactor ! adjust to new denominator
cutBackLevel = cutBackLevel + 1 cutBackLevel = cutBackLevel + 1
time = time - timeinc ! rewind time t = t - Delta_t ! rewind time
timeinc = timeinc/2.0_pREAL Delta_t = Delta_t/2.0_pREAL
print'(/,1x,a)', 'cutting back' print'(/,1x,a)', 'cutting back'
else ! default behavior, exit if spectral solver does not converge else ! default behavior, exit if spectral solver does not converge
if (worldrank == 0) close(statUnit) if (worldrank == 0) close(statUnit)
@ -308,10 +263,10 @@ program DAMASK_mesh
end if end if
else else
guess = .true. ! start guessing after first converged (sub)inc guess = .true. ! start guessing after first converged (sub)inc
timeIncOld = timeinc Delta_t_prev = Delta_t
end if end if
if (.not. cutBack .and. worldrank == 0) then if (.not. cutBack .and. worldrank == 0) then
write(statUnit,*) totalIncsCounter, time, cutBackLevel, & write(statUnit,*) totalIncsCounter, t, cutBackLevel, &
solres%converged, solres%iterationsNeeded ! write statistics about accepted solution solres%converged, solres%iterationsNeeded ! write statistics about accepted solution
flush(statUnit) flush(statUnit)
end if end if
@ -325,10 +280,10 @@ program DAMASK_mesh
print'(/,1x,a,1x,i0,1x,a)', 'increment', totalIncsCounter, 'NOT converged' print'(/,1x,a,1x,i0,1x,a)', 'increment', totalIncsCounter, 'NOT converged'
end if; flush(IO_STDOUT) end if; flush(IO_STDOUT)
if (mod(inc,loadCases(currentLoadCase)%outputFrequency) == 0) then ! at output frequency if (mod(inc,loadCases(l)%f_out) == 0) then ! at output frequency
print'(/,1x,a)', '... saving results ........................................................' print'(/,1x,a)', '... saving results ........................................................'
call FEM_mechanical_updateCoords() call FEM_mechanical_updateCoords()
call materialpoint_result(totalIncsCounter,time) call materialpoint_result(totalIncsCounter,t)
end if end if
@ -344,4 +299,5 @@ program DAMASK_mesh
call quit(0) ! no complains ;) call quit(0) ! no complains ;)
end program DAMASK_mesh end program DAMASK_mesh

View File

@ -18,6 +18,7 @@ module FEM_utilities
use math use math
use misc use misc
use IO use IO
use parallelization
use discretization_mesh use discretization_mesh
use homogenization use homogenization
use FEM_quadrature use FEM_quadrature
@ -38,18 +39,6 @@ module FEM_utilities
character(len=*), parameter, public :: & character(len=*), parameter, public :: &
FIELD_MECH_label = 'mechanical' FIELD_MECH_label = 'mechanical'
enum, bind(c); enumerator :: &
FIELD_UNDEFINED_ID, &
FIELD_MECH_ID
end enum
enum, bind(c); enumerator :: &
COMPONENT_UNDEFINED_ID, &
COMPONENT_MECH_X_ID, &
COMPONENT_MECH_Y_ID, &
COMPONENT_MECH_Z_ID
end enum
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! derived types ! derived types
type, public :: tSolutionState !< return type of solution from FEM solver variants type, public :: tSolutionState !< return type of solution from FEM solver variants
@ -58,17 +47,11 @@ module FEM_utilities
PetscInt :: iterationsNeeded = 0_pPETSCINT PetscInt :: iterationsNeeded = 0_pPETSCINT
end type tSolutionState end type tSolutionState
type, public :: tComponentBC type, public :: tMechBC
integer(kind(COMPONENT_UNDEFINED_ID)) :: ID integer :: nComponents = 0
real(pREAL), allocatable, dimension(:) :: Value real(pREAL), allocatable, dimension(:) :: Value
logical, allocatable, dimension(:) :: Mask logical, allocatable, dimension(:) :: Mask
end type tComponentBC end type tMechBC
type, public :: tFieldBC
integer(kind(FIELD_UNDEFINED_ID)) :: ID
integer :: nComponents = 0
type(tComponentBC), allocatable, dimension(:) :: componentBC
end type tFieldBC
external :: & ! ToDo: write interfaces external :: & ! ToDo: write interfaces
PetscSectionGetFieldComponents, & PetscSectionGetFieldComponents, &
@ -78,12 +61,7 @@ module FEM_utilities
public :: & public :: &
FEM_utilities_init, & FEM_utilities_init, &
utilities_constitutiveResponse, & utilities_constitutiveResponse, &
utilities_projectBCValues, & utilities_projectBCValues
FIELD_MECH_ID, &
COMPONENT_UNDEFINED_ID, &
COMPONENT_MECH_X_ID, &
COMPONENT_MECH_Y_ID, &
COMPONENT_MECH_Z_ID
contains contains
@ -142,24 +120,23 @@ end subroutine FEM_utilities_init
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief calculates constitutive response !> @brief calculates constitutive response
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine utilities_constitutiveResponse(timeinc,P_av,forwardData) subroutine utilities_constitutiveResponse(Delta_t,P_av,forwardData)
real(pREAL), intent(in) :: timeinc !< loading time real(pREAL), intent(in) :: Delta_t !< loading time
logical, intent(in) :: forwardData !< age results logical, intent(in) :: forwardData !< age results
real(pREAL),intent(out), dimension(3,3) :: P_av !< average PK stress real(pREAL),intent(out), dimension(3,3) :: P_av !< average PK stress
integer(MPI_INTEGER_KIND) :: err_MPI integer(MPI_INTEGER_KIND) :: err_MPI
print'(/,1x,a)', '... evaluating constitutive response ......................................' print'(/,1x,a)', '... evaluating constitutive response ......................................'
call homogenization_mechanical_response(timeinc,1,mesh_maxNips*mesh_NcpElems) ! calculate P field call homogenization_mechanical_response(Delta_t,1,mesh_maxNips*mesh_NcpElems) ! calculate P field
if (.not. terminallyIll) &
call homogenization_mechanical_response2(timeinc,[1,mesh_maxNips],[1,mesh_NcpElems])
cutBack = .false. cutBack = .false.
P_av = sum(homogenization_P,dim=3) * wgt P_av = sum(homogenization_P,dim=3) * wgt
call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
end subroutine utilities_constitutiveResponse end subroutine utilities_constitutiveResponse
@ -168,7 +145,7 @@ end subroutine utilities_constitutiveResponse
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief Project BC values to local vector !> @brief Project BC values to local vector
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine utilities_projectBCValues(localVec,section,field,comp,bcPointsIS,BCValue,BCDotValue,timeinc) subroutine utilities_projectBCValues(localVec,section,field,comp,bcPointsIS,BCValue,BCDotValue,Delta_t)
Vec :: localVec Vec :: localVec
PetscInt :: field, comp, nBcPoints, point, dof, numDof, numComp, offset PetscInt :: field, comp, nBcPoints, point, dof, numDof, numComp, offset
@ -176,7 +153,7 @@ subroutine utilities_projectBCValues(localVec,section,field,comp,bcPointsIS,BCVa
IS :: bcPointsIS IS :: bcPointsIS
PetscInt, pointer :: bcPoints(:) PetscInt, pointer :: bcPoints(:)
real(pREAL), pointer :: localArray(:) real(pREAL), pointer :: localArray(:)
real(pREAL) :: BCValue,BCDotValue,timeinc real(pREAL) :: BCValue,BCDotValue,Delta_t
PetscErrorCode :: err_PETSc PetscErrorCode :: err_PETSc
@ -193,7 +170,7 @@ subroutine utilities_projectBCValues(localVec,section,field,comp,bcPointsIS,BCVa
call PetscSectionGetFieldOffset(section,bcPoints(point),field,offset,err_PETSc) call PetscSectionGetFieldOffset(section,bcPoints(point),field,offset,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
do dof = offset+comp+1, offset+numDof, numComp do dof = offset+comp+1, offset+numDof, numComp
localArray(dof) = localArray(dof) + BCValue + BCDotValue*timeinc localArray(dof) = localArray(dof) + BCValue + BCDotValue*Delta_t
end do end do
end do end do
call VecRestoreArrayF90(localVec,localArray,err_PETSc) call VecRestoreArrayF90(localVec,localArray,err_PETSc)

View File

@ -121,13 +121,13 @@ subroutine discretization_mesh_init(restart)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
mesh_Nboundaries = int(Nboundaries) mesh_Nboundaries = int(Nboundaries)
call MPI_Bcast(mesh_Nboundaries,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(mesh_Nboundaries,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call MPI_Bcast(mesh_NcpElemsGlobal,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(mesh_NcpElemsGlobal,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
dim = int(dimPlex) dim = int(dimPlex)
call MPI_Bcast(dim,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(dim,1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
dimPlex = int(dim,pPETSCINT) dimPlex = int(dim,pPETSCINT)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
if (worldsize == 1) then if (worldsize == 1) then
call DMClone(globalMesh,geomMesh,err_PETSc) call DMClone(globalMesh,geomMesh,err_PETSc)
@ -149,7 +149,7 @@ subroutine discretization_mesh_init(restart)
call ISRestoreIndicesF90(faceSetIS,pFaceSets,err_PETSc) call ISRestoreIndicesF90(faceSetIS,pFaceSets,err_PETSc)
end if end if
call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI) call MPI_Bcast(mesh_boundaries,mesh_Nboundaries,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
call DMDestroy(globalMesh,err_PETSc) call DMDestroy(globalMesh,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)

View File

@ -36,8 +36,8 @@ module mesh_mechanical_FEM
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! derived types ! derived types
type tSolutionParams type tSolutionParams
type(tFieldBC) :: fieldBC type(tMechBC),allocatable, dimension(:) :: mechBC
real(pREAL) :: timeinc real(pREAL) :: Delta_t
end type tSolutionParams end type tSolutionParams
type(tSolutionParams) :: params type(tSolutionParams) :: params
@ -97,9 +97,9 @@ contains
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief allocates all neccessary fields and fills them with data !> @brief allocates all neccessary fields and fills them with data
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine FEM_mechanical_init(fieldBC,num_mesh) subroutine FEM_mechanical_init(mechBC,num_mesh)
type(tFieldBC), intent(in) :: fieldBC type(tMechBC), dimension(:), intent(in):: mechBC
type(tDict), pointer, intent(in) :: num_mesh type(tDict), pointer, intent(in) :: num_mesh
DM :: mechanical_mesh DM :: mechanical_mesh
@ -112,7 +112,7 @@ subroutine FEM_mechanical_init(fieldBC,num_mesh)
PetscInt, dimension(:), pointer :: pNumComp, pNumDof, pBcField, pBcPoint PetscInt, dimension(:), pointer :: pNumComp, pNumDof, pBcField, pBcPoint
PetscInt :: numBC, bcSize, nc, & PetscInt :: numBC, bcSize, nc, &
field, faceSet, topologDim, nNodalPoints, & component, faceSet, topologDim, nNodalPoints, &
cellStart, cellEnd, cell, basis cellStart, cellEnd, cell, basis
IS :: bcPoint IS :: bcPoint
@ -208,17 +208,17 @@ subroutine FEM_mechanical_init(fieldBC,num_mesh)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
end do end do
numBC = 0 numBC = 0
do field = 1, dimPlex; do faceSet = 1, mesh_Nboundaries do faceSet = 1, mesh_Nboundaries; do component = 1, dimPlex
if (fieldBC%componentBC(field)%Mask(faceSet)) numBC = numBC + 1 if (mechBC(faceSet)%Mask(component)) numBC = numBC + 1
end do; end do end do; end do
allocate(pbcField(numBC), source=0_pPETSCINT) allocate(pbcField(numBC), source=0_pPETSCINT)
allocate(pbcComps(numBC)) allocate(pbcComps(numBC))
allocate(pbcPoints(numBC)) allocate(pbcPoints(numBC))
numBC = 0 numBC = 0
do field = 1, dimPlex; do faceSet = 1, mesh_Nboundaries do faceSet = 1, mesh_Nboundaries; do component = 1, dimPlex
if (fieldBC%componentBC(field)%Mask(faceSet)) then if (mechBC(faceSet)%Mask(component)) then
numBC = numBC + 1 numBC = numBC + 1
call ISCreateGeneral(PETSC_COMM_WORLD,1_pPETSCINT,[field-1],PETSC_COPY_VALUES,pbcComps(numBC),err_PETSc) call ISCreateGeneral(PETSC_COMM_WORLD,1_pPETSCINT,[component-1],PETSC_COPY_VALUES,pbcComps(numBC),err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMGetStratumSize(mechanical_mesh,'Face Sets',mesh_boundaries(faceSet),bcSize,err_PETSc) call DMGetStratumSize(mechanical_mesh,'Face Sets',mesh_boundaries(faceSet),bcSize,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
@ -320,15 +320,15 @@ end subroutine FEM_mechanical_init
!> @brief solution for the FEM load step !> @brief solution for the FEM load step
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
type(tSolutionState) function FEM_mechanical_solution( & type(tSolutionState) function FEM_mechanical_solution( &
incInfoIn,timeinc,timeinc_old,fieldBC) incInfoIn,Delta_t,Delta_t_prev,mechBC)
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! input data for solution ! input data for solution
real(pREAL), intent(in) :: & real(pREAL), intent(in) :: &
timeinc, & !< increment in time for current solution Delta_t, & !< increment in time for current solution
timeinc_old !< increment in time of last increment Delta_t_prev !< increment in time of last increment
type(tFieldBC), intent(in) :: & type(tMechBC), dimension(:),intent(in) :: &
fieldBC mechBC
character(len=*), intent(in) :: & character(len=*), intent(in) :: &
incInfoIn incInfoIn
@ -339,8 +339,8 @@ type(tSolutionState) function FEM_mechanical_solution( &
FEM_mechanical_solution%converged = .false. FEM_mechanical_solution%converged = .false.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! set module wide availabe data ! set module wide availabe data
params%timeinc = timeinc params%Delta_t = Delta_t
params%fieldBC = fieldBC params%mechBC = mechBC
call SNESSolve(mechanical_snes,PETSC_NULL_VEC,solution,err_PETSc) ! solve mechanical_snes based on solution guess (result in solution) call SNESSolve(mechanical_snes,PETSC_NULL_VEC,solution,err_PETSc) ! solve mechanical_snes based on solution guess (result in solution)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
@ -380,7 +380,7 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,err_PETSc
real(pREAL), dimension(cellDof), target :: f_scal real(pREAL), dimension(cellDof), target :: f_scal
PetscReal :: IcellJMat(dimPlex,dimPlex) PetscReal :: IcellJMat(dimPlex,dimPlex)
PetscReal, dimension(:),pointer :: pV0, pCellJ, pInvcellJ, basisField, basisFieldDer PetscReal, dimension(:),pointer :: pV0, pCellJ, pInvcellJ, basisField, basisFieldDer
PetscInt :: cellStart, cellEnd, cell, field, face, & PetscInt :: cellStart, cellEnd, cell, component, face, &
qPt, basis, comp, cidx, & qPt, basis, comp, cidx, &
numFields, & numFields, &
bcSize,m,i bcSize,m,i
@ -406,14 +406,14 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,err_PETSc
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call VecWAXPY(x_local,1.0_pREAL,xx_local,solution_local,err_PETSc) call VecWAXPY(x_local,1.0_pREAL,xx_local,solution_local,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
do field = 1_pPETSCINT, dimPlex; do face = 1_pPETSCINT, mesh_Nboundaries do face = 1_pPETSCINT, mesh_Nboundaries; do component = 1_pPETSCINT, dimPlex
if (params%fieldBC%componentBC(field)%Mask(face)) then if (params%mechBC(face)%Mask(component)) then
call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,err_PETSc) call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,err_PETSc)
if (bcSize > 0) then if (bcSize > 0) then
call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc) call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call utilities_projectBCValues(x_local,section,0_pPETSCINT,field-1,bcPoints, & call utilities_projectBCValues(x_local,section,0_pPETSCINT,component-1,bcPoints, &
0.0_pREAL,params%fieldBC%componentBC(field)%Value(face),params%timeinc) 0.0_pREAL,params%mechBC(face)%Value(component),params%Delta_t)
call ISDestroy(bcPoints,err_PETSc) call ISDestroy(bcPoints,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
end if end if
@ -459,9 +459,9 @@ subroutine FEM_mechanical_formResidual(dm_local,xx_local,f_local,dummy,err_PETSc
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
! evaluate constitutive response ! evaluate constitutive response
call utilities_constitutiveResponse(params%timeinc,P_av,ForwardData) call utilities_constitutiveResponse(params%Delta_t,P_av,ForwardData)
call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI) call MPI_Allreduce(MPI_IN_PLACE,terminallyIll,1_MPI_INTEGER_KIND,MPI_LOGICAL,MPI_LOR,MPI_COMM_WORLD,err_MPI)
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error' call parallelization_chkerr(err_MPI)
ForwardData = .false. ForwardData = .false.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
@ -529,7 +529,7 @@ subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,err_P
real(pREAL),dimension(cellDOF,cellDOF), target :: K_e real(pREAL),dimension(cellDOF,cellDOF), target :: K_e
real(pREAL),dimension(cellDOF,cellDOF) :: K_eA, K_eB real(pREAL),dimension(cellDOF,cellDOF) :: K_eA, K_eB
PetscInt :: cellStart, cellEnd, cell, field, face, & PetscInt :: cellStart, cellEnd, cell, component, face, &
qPt, basis, comp, cidx,bcSize, m, i qPt, basis, comp, cidx,bcSize, m, i
IS :: bcPoints IS :: bcPoints
@ -556,14 +556,14 @@ subroutine FEM_mechanical_formJacobian(dm_local,xx_local,Jac_pre,Jac,dummy,err_P
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call VecWAXPY(x_local,1.0_pREAL,xx_local,solution_local,err_PETSc) call VecWAXPY(x_local,1.0_pREAL,xx_local,solution_local,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
do field = 1, dimPlex; do face = 1, mesh_Nboundaries do face = 1, mesh_Nboundaries; do component = 1, dimPlex
if (params%fieldBC%componentBC(field)%Mask(face)) then if (params%mechBC(face)%Mask(component)) then
call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,err_PETSc) call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,err_PETSc)
if (bcSize > 0) then if (bcSize > 0) then
call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc) call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call utilities_projectBCValues(x_local,section,0_pPETSCINT,field-1,bcPoints, & call utilities_projectBCValues(x_local,section,0_pPETSCINT,component-1,bcPoints, &
0.0_pREAL,params%fieldBC%componentBC(field)%Value(face),params%timeinc) 0.0_pREAL,params%mechBC(face)%Value(component),params%Delta_t)
call ISDestroy(bcPoints,err_PETSc) call ISDestroy(bcPoints,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
end if end if
@ -665,17 +665,17 @@ end subroutine FEM_mechanical_formJacobian
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief forwarding routine !> @brief forwarding routine
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine FEM_mechanical_forward(guess,timeinc,timeinc_old,fieldBC) subroutine FEM_mechanical_forward(guess,Delta_t,Delta_t_prev,mechBC)
type(tFieldBC), intent(in) :: & type(tMechBC), dimension(:), intent(in) :: &
fieldBC mechBC
real(pREAL), intent(in) :: & real(pREAL), intent(in) :: &
timeinc_old, & Delta_t_prev, &
timeinc Delta_t
logical, intent(in) :: & logical, intent(in) :: &
guess guess
PetscInt :: field, face, bcSize PetscInt :: component, face, bcSize
DM :: dm_local DM :: dm_local
Vec :: x_local Vec :: x_local
PetscSection :: section PetscSection :: section
@ -686,7 +686,6 @@ subroutine FEM_mechanical_forward(guess,timeinc,timeinc_old,fieldBC)
! forward last inc ! forward last inc
if (guess .and. .not. cutBack) then if (guess .and. .not. cutBack) then
ForwardData = .True. ForwardData = .True.
homogenization_F0 = homogenization_F
call SNESGetDM(mechanical_snes,dm_local,err_PETSc) !< retrieve mesh info from mechanical_snes into dm_local call SNESGetDM(mechanical_snes,dm_local,err_PETSc) !< retrieve mesh info from mechanical_snes into dm_local
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call DMGetSection(dm_local,section,err_PETSc) call DMGetSection(dm_local,section,err_PETSc)
@ -701,14 +700,14 @@ subroutine FEM_mechanical_forward(guess,timeinc,timeinc_old,fieldBC)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call VecAXPY(solution_local,1.0_pREAL,x_local,err_PETSc) call VecAXPY(solution_local,1.0_pREAL,x_local,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
do field = 1, dimPlex; do face = 1, mesh_Nboundaries do face = 1, mesh_Nboundaries; do component = 1, dimPlex
if (fieldBC%componentBC(field)%Mask(face)) then if (mechBC(face)%Mask(component)) then
call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,err_PETSc) call DMGetStratumSize(dm_local,'Face Sets',mesh_boundaries(face),bcSize,err_PETSc)
if (bcSize > 0) then if (bcSize > 0) then
call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc) call DMGetStratumIS(dm_local,'Face Sets',mesh_boundaries(face),bcPoints,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call utilities_projectBCValues(solution_local,section,0_pPETSCINT,field-1,bcPoints, & call utilities_projectBCValues(solution_local,section,0_pPETSCINT,component-1,bcPoints, &
0.0_pREAL,fieldBC%componentBC(field)%Value(face),timeinc_old) 0.0_pREAL,mechBC(face)%Value(component),Delta_t_prev)
call ISDestroy(bcPoints,err_PETSc) call ISDestroy(bcPoints,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
end if end if
@ -721,12 +720,12 @@ subroutine FEM_mechanical_forward(guess,timeinc,timeinc_old,fieldBC)
! update rate and forward last inc ! update rate and forward last inc
call VecCopy(solution,solution_rate,err_PETSc) call VecCopy(solution,solution_rate,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call VecScale(solution_rate,timeinc_old**(-1),err_PETSc) call VecScale(solution_rate,Delta_t_prev**(-1),err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
end if end if
call VecCopy(solution_rate,solution,err_PETSc) call VecCopy(solution_rate,solution,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
call VecScale(solution,timeinc,err_PETSc) call VecScale(solution,Delta_t,err_PETSc)
CHKERRQ(err_PETSc) CHKERRQ(err_PETSc)
end subroutine FEM_mechanical_forward end subroutine FEM_mechanical_forward

View File

@ -326,14 +326,6 @@ module phase
real(pREAL) :: f real(pREAL) :: f
end function phase_f_T end function phase_f_T
module subroutine plastic_nonlocal_updateCompatibility(orientation,ph,ip,el)
integer, intent(in) :: &
ph, &
ip, &
el
type(tRotationContainer), dimension(:), intent(in) :: orientation
end subroutine plastic_nonlocal_updateCompatibility
module subroutine plastic_dependentState(ph,en) module subroutine plastic_dependentState(ph,en)
integer, intent(in) :: & integer, intent(in) :: &
ph, & ph, &
@ -366,7 +358,6 @@ module phase
phase_allocateState, & phase_allocateState, &
phase_forward, & phase_forward, &
phase_restore, & phase_restore, &
plastic_nonlocal_updateCompatibility, &
converged, & converged, &
phase_mechanical_constitutive, & phase_mechanical_constitutive, &
phase_thermal_constitutive, & phase_thermal_constitutive, &
@ -387,7 +378,7 @@ contains
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief Initialize constitutive models for individual physics !> @brief Initialize constitutive models for individual physics
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine phase_init subroutine phase_init()
integer :: & integer :: &
ph, ce, co, ma ph, ce, co, ma
@ -544,27 +535,18 @@ subroutine crystallite_init()
integer :: & integer :: &
ce, & ce, &
co, & !< counter in integration point component loop co, & !< counter in integration point component loop
ip, & !< counter in integration point loop
el, & !< counter in element loop
en, ph en, ph
type(tDict), pointer :: &
num_phase, &
phases
phases => config_material%get_dict('phase')
!$OMP PARALLEL DO PRIVATE(ce,ph,en) !$OMP PARALLEL DO PRIVATE(ph,en)
do el = 1, discretization_Nelems do ce = 1, size(material_ID_homogenization)
do ip = 1, discretization_nIPs
ce = (el-1)*discretization_nIPs + ip
do co = 1,homogenization_Nconstituents(material_ID_homogenization(ce)) do co = 1,homogenization_Nconstituents(material_ID_homogenization(ce))
en = material_entry_phase(co,ce)
ph = material_ID_phase(co,ce) ph = material_ID_phase(co,ce)
call crystallite_orientations(co,ip,el) en = material_entry_phase(co,ce)
call crystallite_orientations(co,ce)
call plastic_dependentState(ph,en) ! update dependent state variables to be consistent with basic states call plastic_dependentState(ph,en) ! update dependent state variables to be consistent with basic states
end do end do
end do end do
end do
!$OMP END PARALLEL DO !$OMP END PARALLEL DO
@ -572,32 +554,27 @@ end subroutine crystallite_init
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief calculates orientations !> @brief Update orientations and, if needed, compatibility.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
subroutine crystallite_orientations(co,ip,el) subroutine crystallite_orientations(co,ce)
integer, intent(in) :: & integer, intent(in) :: &
co, & !< counter in integration point component loop co, &
ip, & !< counter in integration point loop ce
el !< counter in element loop
integer :: ph, en integer :: ph, en
ph = material_ID_phase(co,(el-1)*discretization_nIPs + ip) ph = material_ID_phase(co,ce)
en = material_entry_phase(co,(el-1)*discretization_nIPs + ip) en = material_entry_phase(co,ce)
call phase_O(ph)%data(en)%fromMatrix(transpose(math_rotationalPart(mechanical_F_e(ph,en)))) call phase_O(ph)%data(en)%fromMatrix(transpose(math_rotationalPart(mechanical_F_e(ph,en))))
if (plasticState(material_ID_phase(1,(el-1)*discretization_nIPs + ip))%nonlocal) &
call plastic_nonlocal_updateCompatibility(phase_O,material_ID_phase(1,(el-1)*discretization_nIPs + ip),ip,el)
end subroutine crystallite_orientations end subroutine crystallite_orientations
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief Map 2nd order tensor to reference config !> @brief Map 2nd order tensor to reference configuration.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
function crystallite_push33ToRef(co,ce, tensor33) function crystallite_push33ToRef(co,ce, tensor33)
@ -621,15 +598,17 @@ end function crystallite_push33ToRef
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
!> @brief determines whether a point is converged !> @brief Determine whether a point is converged.
!-------------------------------------------------------------------------------------------------- !--------------------------------------------------------------------------------------------------
logical pure function converged(residuum,state,atol) logical pure function converged(residuum,state,atol)
real(pREAL), intent(in), dimension(:) :: & real(pREAL), intent(in), dimension(:) :: &
residuum, state, atol residuum, state, atol
real(pREAL) :: & real(pREAL) :: &
rTol rTol
rTol = num%rTol_crystalliteState rTol = num%rTol_crystalliteState
converged = all(abs(residuum) <= max(atol, rtol*abs(state))) converged = all(abs(residuum) <= max(atol, rtol*abs(state)))

View File

@ -204,6 +204,11 @@ submodule(phase:mechanical) plastic
en en
end subroutine plastic_nonlocal_deltaState end subroutine plastic_nonlocal_deltaState
module subroutine plastic_nonlocal_updateCompatibility(orientation,ph,en)
integer, intent(in) :: ph, en
type(tRotationContainer), dimension(:), intent(in) :: orientation
end subroutine plastic_nonlocal_updateCompatibility
end interface end interface
contains contains
@ -359,6 +364,7 @@ module subroutine plastic_dependentState(ph,en)
case (MECHANICAL_PLASTICITY_NONLOCAL) plasticType case (MECHANICAL_PLASTICITY_NONLOCAL) plasticType
call nonlocal_dependentState(ph,en) call nonlocal_dependentState(ph,en)
if (plasticState(ph)%nonlocal) call plastic_nonlocal_updateCompatibility(phase_O,ph,en)
end select plasticType end select plasticType

File diff suppressed because it is too large Load Diff