Merge branch 'Fortran-polishing' into 'development'
a number of small improvements See merge request damask/DAMASK!516
This commit is contained in:
commit
e0ed668ce0
|
@ -42,7 +42,7 @@ string(TOUPPER "${CMAKE_BUILD_TYPE}" CMAKE_BUILD_TYPE)
|
|||
if(CMAKE_BUILD_TYPE STREQUAL "DEBUG" OR CMAKE_BUILD_TYPE STREQUAL "SYNTAXONLY")
|
||||
set(DEBUG_FLAGS "${DEBUG_FLAGS} -DDEBUG")
|
||||
set(PARALLEL "OFF")
|
||||
set(OPTI "OFF")
|
||||
set(OPTI "DEBUG")
|
||||
elseif(CMAKE_BUILD_TYPE STREQUAL "RELEASE")
|
||||
set(PARALLEL "ON")
|
||||
set(OPTI "DEFENSIVE")
|
||||
|
|
2
PRIVATE
2
PRIVATE
|
@ -1 +1 @@
|
|||
Subproject commit ebb7f0ce78d11275020af0ba60f929f95b446932
|
||||
Subproject commit 5774122bf48d637704bb4afb10b87c34a4dbcaba
|
|
@ -9,26 +9,24 @@ if (OPENMP)
|
|||
set (OPENMP_FLAGS "-fopenmp")
|
||||
endif ()
|
||||
|
||||
if (OPTIMIZATION STREQUAL "OFF")
|
||||
if (OPTIMIZATION STREQUAL "DEBUG")
|
||||
set (OPTIMIZATION_FLAGS "-Og")
|
||||
elseif (OPTIMIZATION STREQUAL "OFF")
|
||||
set (OPTIMIZATION_FLAGS "-O0")
|
||||
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
|
||||
set (OPTIMIZATION_FLAGS "-O2 -mtune=generic")
|
||||
set (OPTIMIZATION_FLAGS "-O2 -mtune=native")
|
||||
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
|
||||
set (OPTIMIZATION_FLAGS "-O3 -march=native -ffast-math -funroll-loops -ftree-vectorize")
|
||||
set (OPTIMIZATION_FLAGS "-O3 -march=native -funroll-loops -ftree-vectorize -flto")
|
||||
endif ()
|
||||
|
||||
set (STANDARD_CHECK "-std=f2018 -pedantic-errors" )
|
||||
set (LINKER_FLAGS "${LINKER_FLAGS} -Wl")
|
||||
# options parsed directly to the linker
|
||||
set (LINKER_FLAGS "${LINKER_FLAGS},-undefined,dynamic_lookup" )
|
||||
# ensure to link against dynamic libraries
|
||||
|
||||
#------------------------------------------------------------------------------------------------
|
||||
# Fine tuning compilation options
|
||||
set (COMPILE_FLAGS "${COMPILE_FLAGS} -cpp")
|
||||
# preprocessor
|
||||
|
||||
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fPIC -fPIE")
|
||||
set (COMPILE_FLAGS "${COMPILE_FLAGS} -fPIE")
|
||||
# position independent code
|
||||
|
||||
set (COMPILE_FLAGS "${COMPILE_FLAGS} -ffree-line-length-132")
|
||||
|
@ -123,6 +121,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -ffpe-trap=invalid,zero,overflow")
|
|||
set (DEBUG_FLAGS "${DEBUG_FLAGS} -g")
|
||||
# Generate symbolic debugging information in the object file
|
||||
|
||||
set (DEBUG_FLAGS "${DEBUG_FLAGS} -Og")
|
||||
# Optimize debugging experience
|
||||
|
||||
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fbacktrace")
|
||||
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fdump-core")
|
||||
set (DEBUG_FLAGS "${DEBUG_FLAGS} -fcheck=all")
|
||||
|
|
|
@ -9,12 +9,12 @@ if (OPENMP)
|
|||
set (OPENMP_FLAGS "-qopenmp -parallel")
|
||||
endif ()
|
||||
|
||||
if (OPTIMIZATION STREQUAL "OFF")
|
||||
if (OPTIMIZATION STREQUAL "OFF" OR OPTIMIZATION STREQUAL "DEBUG")
|
||||
set (OPTIMIZATION_FLAGS "-O0 -no-ip")
|
||||
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
|
||||
set (OPTIMIZATION_FLAGS "-O2")
|
||||
elseif (OPTIMIZATION STREQUAL "AGGRESSIVE")
|
||||
set (OPTIMIZATION_FLAGS "-ipo -O3 -no-prec-div -fp-model fast=2 -xHost")
|
||||
set (OPTIMIZATION_FLAGS "-ipo -O3 -fp-model fast=2 -xHost")
|
||||
# -fast = -ipo, -O3, -no-prec-div, -static, -fp-model fast=2, and -xHost"
|
||||
endif ()
|
||||
|
||||
|
@ -110,6 +110,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0")
|
|||
# generate debug information for parameters
|
||||
# Disabled due to ICE when compiling phase_damage.f90 (not understandable, there is no parameter in there)
|
||||
|
||||
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug all")
|
||||
# generate complete debugging information
|
||||
|
||||
# Additional options
|
||||
# -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits
|
||||
# -check: Checks at runtime, where
|
||||
|
|
|
@ -9,7 +9,7 @@ if (OPENMP)
|
|||
set (OPENMP_FLAGS "-qopenmp")
|
||||
endif ()
|
||||
|
||||
if (OPTIMIZATION STREQUAL "OFF")
|
||||
if (OPTIMIZATION STREQUAL "OFF" OR OPTIMIZATION STREQUAL "DEBUG")
|
||||
set (OPTIMIZATION_FLAGS "-O0")
|
||||
elseif (OPTIMIZATION STREQUAL "DEFENSIVE")
|
||||
set (OPTIMIZATION_FLAGS "-O2")
|
||||
|
@ -109,6 +109,9 @@ set (DEBUG_FLAGS "${DEBUG_FLAGS} -fpe-all=0")
|
|||
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug-parameters all")
|
||||
# generate debug information for parameters
|
||||
|
||||
set (DEBUG_FLAGS "${DEBUG_FLAGS} -debug all")
|
||||
# generate complete debugging information
|
||||
|
||||
# Additional options
|
||||
# -heap-arrays: Should not be done for OpenMP, but set "ulimit -s unlimited" on shell. Probably it helps also to unlimit other limits
|
||||
# -check: Checks at runtime, where
|
||||
|
|
|
@ -62,8 +62,8 @@ class Grid:
|
|||
mat_max = np.nanmax(self.material)
|
||||
mat_N = self.N_materials
|
||||
return util.srepr([
|
||||
f'cells : {util.srepr(self.cells, " x ")}',
|
||||
f'size : {util.srepr(self.size, " x ")} / m³',
|
||||
f'cells: {util.srepr(self.cells, " × ")}',
|
||||
f'size: {util.srepr(self.size, " × ")} / m³',
|
||||
f'origin: {util.srepr(self.origin," ")} / m',
|
||||
f'# materials: {mat_N}' + ('' if mat_min == 0 and mat_max+1 == mat_N else
|
||||
f' (min: {mat_min}, max: {mat_max})')
|
||||
|
|
|
@ -90,7 +90,6 @@ subroutine CPFEM_initAll
|
|||
call material_init(.false.)
|
||||
call phase_init
|
||||
call homogenization_init
|
||||
call crystallite_init
|
||||
call CPFEM_init
|
||||
call config_deallocate
|
||||
|
||||
|
|
|
@ -68,7 +68,6 @@ subroutine CPFEM_initAll
|
|||
call material_init(restart=interface_restartInc>0)
|
||||
call phase_init
|
||||
call homogenization_init
|
||||
call crystallite_init
|
||||
call CPFEM_init
|
||||
call config_deallocate
|
||||
|
||||
|
|
|
@ -10,7 +10,8 @@ module HDF5_utilities
|
|||
#include <petsc/finclude/petscsys.h>
|
||||
use PETScSys
|
||||
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||
use MPI
|
||||
use MPI_f08
|
||||
use MPI, only: MPI_INFO_NULL_F90 => MPI_INFO_NULL
|
||||
#endif
|
||||
#endif
|
||||
|
||||
|
@ -162,9 +163,9 @@ integer(HID_T) function HDF5_openFile(fileName,mode,parallel)
|
|||
character, intent(in), optional :: mode
|
||||
logical, intent(in), optional :: parallel
|
||||
|
||||
character :: m
|
||||
integer(HID_T) :: plist_id
|
||||
integer :: hdferr
|
||||
character :: m
|
||||
integer(HID_T) :: plist_id
|
||||
integer :: hdferr
|
||||
|
||||
|
||||
if (present(mode)) then
|
||||
|
@ -178,9 +179,15 @@ integer(HID_T) function HDF5_openFile(fileName,mode,parallel)
|
|||
|
||||
#ifdef PETSC
|
||||
if (present(parallel)) then
|
||||
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||
if (parallel) call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL_F90, hdferr)
|
||||
else
|
||||
call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL_F90, hdferr)
|
||||
#else
|
||||
if (parallel) call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr)
|
||||
else
|
||||
call H5Pset_fapl_mpio_f(plist_id, PETSC_COMM_WORLD, MPI_INFO_NULL, hdferr)
|
||||
#endif
|
||||
end if
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
#endif
|
||||
|
@ -1860,7 +1867,7 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
|
|||
globalShape !< shape of the dataset (all processes)
|
||||
integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id, aplist_id
|
||||
|
||||
integer, dimension(worldsize) :: &
|
||||
integer(MPI_INTEGER_KIND), dimension(worldsize) :: &
|
||||
readSize !< contribution of all processes
|
||||
integer :: hdferr
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||
|
@ -1871,13 +1878,13 @@ subroutine initialize_read(dset_id, filespace_id, memspace_id, plist_id, aplist_
|
|||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
readSize = 0
|
||||
readSize(worldrank+1) = int(localShape(ubound(localShape,1)))
|
||||
readSize = 0_MPI_INTEGER_KIND
|
||||
readSize(worldrank+1) = int(localShape(ubound(localShape,1)),MPI_INTEGER_KIND)
|
||||
#ifdef PETSC
|
||||
if (parallel) then
|
||||
call H5Pset_dxpl_mpio_f(plist_id, H5FD_MPIO_COLLECTIVE_F, hdferr)
|
||||
if(hdferr < 0) error stop 'HDF5 error'
|
||||
call MPI_allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process
|
||||
call MPI_Allreduce(MPI_IN_PLACE,readSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get total output size over each process
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
end if
|
||||
#endif
|
||||
|
@ -1954,8 +1961,8 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
|
|||
totalShape !< shape of the dataset (all processes)
|
||||
integer(HID_T), intent(out) :: dset_id, filespace_id, memspace_id, plist_id
|
||||
|
||||
integer, dimension(worldsize) :: writeSize !< contribution of all processes
|
||||
integer(HID_T) :: dcpl
|
||||
integer(MPI_INTEGER_KIND), dimension(worldsize) :: writeSize !< contribution of all processes
|
||||
integer(HID_T) :: dcpl
|
||||
integer :: hdferr
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||
integer(HSIZE_T), parameter :: chunkSize = 1024_HSIZE_T**2/8_HSIZE_T
|
||||
|
@ -1974,11 +1981,11 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! determine the global data layout among all processes
|
||||
writeSize = 0
|
||||
writeSize(worldrank+1) = int(myShape(ubound(myShape,1)))
|
||||
writeSize = 0_MPI_INTEGER_KIND
|
||||
writeSize(worldrank+1) = int(myShape(ubound(myShape,1)),MPI_INTEGER_KIND)
|
||||
#ifdef PETSC
|
||||
if (parallel) then
|
||||
call MPI_allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,PETSC_COMM_WORLD,err_MPI) ! get total output size over each process
|
||||
call MPI_Allreduce(MPI_IN_PLACE,writeSize,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI) ! get total output size over each process
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
end if
|
||||
#endif
|
||||
|
@ -2009,7 +2016,7 @@ subroutine initialize_write(dset_id, filespace_id, memspace_id, plist_id, &
|
|||
if (hdferr < 0) error stop 'HDF5 error'
|
||||
end if
|
||||
end if
|
||||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! create dataspace in memory (local shape) and in file (global shape)
|
||||
call H5Screate_simple_f(size(myShape), myShape, memspace_id, hdferr, myShape)
|
||||
|
|
|
@ -584,8 +584,6 @@ subroutine IO_warning(warning_ID,el,ip,g,ext_msg)
|
|||
character(len=pStringLen) :: formatString
|
||||
|
||||
select case (warning_ID)
|
||||
case (42)
|
||||
msg = 'parameter has no effect'
|
||||
case (47)
|
||||
msg = 'no valid parameter for FFTW, using FFTW_PATIENT'
|
||||
case (207)
|
||||
|
@ -594,8 +592,6 @@ subroutine IO_warning(warning_ID,el,ip,g,ext_msg)
|
|||
msg = 'crystallite responds elastically'
|
||||
case (601)
|
||||
msg = 'stiffness close to zero'
|
||||
case (700)
|
||||
msg = 'unknown crystal symmetry'
|
||||
case (709)
|
||||
msg = 'read only the first document'
|
||||
case default
|
||||
|
|
|
@ -9,8 +9,8 @@ module constants
|
|||
public
|
||||
|
||||
real(pReal), parameter :: &
|
||||
T_ROOM = 300.0_pReal, & !< Room temperature in K. ToDo: IUPAC: 298.15
|
||||
K_B = 1.38e-23_pReal, & !< Boltzmann constant in J/Kelvin
|
||||
N_A = 6.02214076e23_pReal !< Avogadro constant in 1/mol
|
||||
T_ROOM = 293.15_pReal, & !< Room temperature in K (20°C)
|
||||
K_B = 1.380649e-23_pReal, & !< Boltzmann constant in J/Kelvin (https://doi.org/10.1351/goldbook)
|
||||
N_A = 6.02214076e23_pReal !< Avogadro constant in 1/mol (https://doi.org/10.1351/goldbook)
|
||||
|
||||
end module constants
|
||||
|
|
|
@ -32,7 +32,7 @@ program DAMASK_grid
|
|||
implicit none
|
||||
|
||||
type :: tLoadCase
|
||||
type(rotation) :: rot !< rotation of BC
|
||||
type(tRotation) :: rot !< rotation of BC
|
||||
type(tBoundaryCondition) :: stress, & !< stress BC
|
||||
deformation !< deformation BC (dot_F, F, or L)
|
||||
real(pReal) :: t, & !< length of increment
|
||||
|
|
|
@ -27,10 +27,10 @@ module discretization_grid
|
|||
private
|
||||
|
||||
integer, dimension(3), public, protected :: &
|
||||
grid !< (global) grid
|
||||
cells !< (global) cells
|
||||
integer, public, protected :: &
|
||||
grid3, & !< (local) grid in 3rd direction
|
||||
grid3Offset !< (local) grid offset in 3rd direction
|
||||
cells3, & !< (local) cells in 3rd direction
|
||||
cells3Offset !< (local) cells offset in 3rd direction
|
||||
real(pReal), dimension(3), public, protected :: &
|
||||
geomSize !< (global) physical size
|
||||
real(pReal), public, protected :: &
|
||||
|
@ -55,7 +55,7 @@ subroutine discretization_grid_init(restart)
|
|||
mySize, & !< domain size of this process
|
||||
origin !< (global) distance to origin
|
||||
integer, dimension(3) :: &
|
||||
myGrid !< domain grid of this process
|
||||
myGrid !< domain cells of this process
|
||||
|
||||
integer, dimension(:), allocatable :: &
|
||||
materialAt, materialAt_global
|
||||
|
@ -77,7 +77,7 @@ subroutine discretization_grid_init(restart)
|
|||
|
||||
if (worldrank == 0) then
|
||||
fileContent = IO_read(interface_geomFile)
|
||||
call readVTI(grid,geomSize,origin,materialAt_global,fileContent)
|
||||
call readVTI(cells,geomSize,origin,materialAt_global,fileContent)
|
||||
fname = interface_geomFile
|
||||
if (scan(fname,'/') /= 0) fname = fname(scan(fname,'/',.true.)+1:)
|
||||
call results_openJobFile(parallel=.false.)
|
||||
|
@ -88,37 +88,37 @@ subroutine discretization_grid_init(restart)
|
|||
end if
|
||||
|
||||
|
||||
call MPI_Bcast(grid,3_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
|
||||
call MPI_Bcast(cells,3_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1')
|
||||
if (cells(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1')
|
||||
call MPI_Bcast(geomSize,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call MPI_Bcast(origin,3_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD, err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
||||
print'(/,1x,a,3(i12,1x))', 'cells a b c: ', grid
|
||||
print '(1x,a,3(es12.5,1x))', 'size x y z: ', geomSize
|
||||
print '(1x,a,3(es12.5,1x))', 'origin x y z: ', origin
|
||||
print'(/,1x,a,i0,a,i0,a,i0)', 'cells: ', cells(1), ' × ', cells(2), ' × ', cells(3)
|
||||
print '(1x,a,es8.2,a,es8.2,a,es8.2,a)', 'size: ', geomSize(1), ' × ', geomSize(2), ' × ', geomSize(3), ' / m³'
|
||||
print '(1x,a,es8.2,a,es8.2,a,es8.2,a)', 'origin: ', origin(1), ' ', origin(2), ' ', origin(3), ' / m'
|
||||
|
||||
if (worldsize>grid(3)) call IO_error(894, ext_msg='number of processes exceeds grid(3)')
|
||||
if (worldsize>cells(3)) call IO_error(894, ext_msg='number of processes exceeds cells(3)')
|
||||
|
||||
call fftw_mpi_init
|
||||
devNull = fftw_mpi_local_size_3d(int(grid(3),C_INTPTR_T), &
|
||||
int(grid(2),C_INTPTR_T), &
|
||||
int(grid(1),C_INTPTR_T)/2+1, &
|
||||
devNull = fftw_mpi_local_size_3d(int(cells(3),C_INTPTR_T), &
|
||||
int(cells(2),C_INTPTR_T), &
|
||||
int(cells(1),C_INTPTR_T)/2+1, &
|
||||
PETSC_COMM_WORLD, &
|
||||
z, & ! domain grid size along z
|
||||
z_offset) ! domain grid offset along z
|
||||
z, & ! domain cells size along z
|
||||
z_offset) ! domain cells offset along z
|
||||
if (z==0_C_INTPTR_T) call IO_error(894, ext_msg='Cannot distribute MPI processes')
|
||||
|
||||
grid3 = int(z)
|
||||
grid3Offset = int(z_offset)
|
||||
size3 = geomSize(3)*real(grid3,pReal) /real(grid(3),pReal)
|
||||
size3Offset = geomSize(3)*real(grid3Offset,pReal)/real(grid(3),pReal)
|
||||
myGrid = [grid(1:2),grid3]
|
||||
cells3 = int(z)
|
||||
cells3Offset = int(z_offset)
|
||||
size3 = geomSize(3)*real(cells3,pReal) /real(cells(3),pReal)
|
||||
size3Offset = geomSize(3)*real(cells3Offset,pReal)/real(cells(3),pReal)
|
||||
myGrid = [cells(1:2),cells3]
|
||||
mySize = [geomSize(1:2),size3]
|
||||
|
||||
call MPI_Gather(product(grid(1:2))*grid3Offset, 1_MPI_INTEGER_KIND,MPI_INTEGER,displs,&
|
||||
call MPI_Gather(product(cells(1:2))*cells3Offset, 1_MPI_INTEGER_KIND,MPI_INTEGER,displs,&
|
||||
1_MPI_INTEGER_KIND,MPI_INTEGER,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call MPI_Gather(product(myGrid), 1_MPI_INTEGER_KIND,MPI_INTEGER,sendcounts,&
|
||||
|
@ -131,10 +131,10 @@ subroutine discretization_grid_init(restart)
|
|||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
||||
call discretization_init(materialAt, &
|
||||
IPcoordinates0(myGrid,mySize,grid3Offset), &
|
||||
Nodes0(myGrid,mySize,grid3Offset),&
|
||||
merge((grid(1)+1) * (grid(2)+1) * (grid3+1),& ! write top layer...
|
||||
(grid(1)+1) * (grid(2)+1) * grid3,& ! ...unless not last process
|
||||
IPcoordinates0(myGrid,mySize,cells3Offset), &
|
||||
Nodes0(myGrid,mySize,cells3Offset),&
|
||||
merge((cells(1)+1) * (cells(2)+1) * (cells3+1),& ! write top layer...
|
||||
(cells(1)+1) * (cells(2)+1) * cells3,& ! ...unless not last process
|
||||
worldrank+1==worldsize))
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
|
@ -142,7 +142,7 @@ subroutine discretization_grid_init(restart)
|
|||
if (.not. restart) then
|
||||
call results_openJobFile
|
||||
call results_closeGroup(results_addGroup('geometry'))
|
||||
call results_addAttribute('cells', grid, '/geometry')
|
||||
call results_addAttribute('cells', cells, '/geometry')
|
||||
call results_addAttribute('size', geomSize,'/geometry')
|
||||
call results_addAttribute('origin',origin, '/geometry')
|
||||
call results_closeJobFile
|
||||
|
@ -170,13 +170,13 @@ end subroutine discretization_grid_init
|
|||
!> @brief Parse vtk image data (.vti)
|
||||
!> @details https://vtk.org/Wiki/VTK_XML_Formats
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine readVTI(grid,geomSize,origin,material, &
|
||||
subroutine readVTI(cells,geomSize,origin,material, &
|
||||
fileContent)
|
||||
|
||||
integer, dimension(3), intent(out) :: &
|
||||
grid ! grid (across all processes!)
|
||||
cells ! cells (across all processes!)
|
||||
real(pReal), dimension(3), intent(out) :: &
|
||||
geomSize, & ! size (across all processes!)
|
||||
geomSize, & ! size (across all processes!)
|
||||
origin ! origin (across all processes!)
|
||||
integer, dimension(:), intent(out), allocatable :: &
|
||||
material
|
||||
|
@ -190,7 +190,7 @@ subroutine readVTI(grid,geomSize,origin,material, &
|
|||
s
|
||||
|
||||
|
||||
grid = -1
|
||||
cells = -1
|
||||
geomSize = -1.0_pReal
|
||||
|
||||
inFile = .false.
|
||||
|
@ -215,7 +215,7 @@ subroutine readVTI(grid,geomSize,origin,material, &
|
|||
if (.not. inImage) then
|
||||
if (index(fileContent(startPos:endPos),'<ImageData',kind=pI64) /= 0_pI64) then
|
||||
inImage = .true.
|
||||
call cellsSizeOrigin(grid,geomSize,origin,fileContent(startPos:endPos))
|
||||
call cellsSizeOrigin(cells,geomSize,origin,fileContent(startPos:endPos))
|
||||
end if
|
||||
else
|
||||
if (index(fileContent(startPos:endPos),'<CellData',kind=pI64) /= 0_pI64) then
|
||||
|
@ -246,10 +246,10 @@ subroutine readVTI(grid,geomSize,origin,material, &
|
|||
|
||||
end do
|
||||
|
||||
if (.not. allocated(material)) call IO_error(error_ID = 844, ext_msg='material data not found')
|
||||
if (size(material) /= product(grid)) call IO_error(error_ID = 844, ext_msg='size(material)')
|
||||
if (any(geomSize<=0)) call IO_error(error_ID = 844, ext_msg='size')
|
||||
if (any(grid<1)) call IO_error(error_ID = 844, ext_msg='grid')
|
||||
if (.not. allocated(material)) call IO_error(error_ID = 844, ext_msg='material data not found')
|
||||
if (size(material) /= product(cells)) call IO_error(error_ID = 844, ext_msg='size(material)')
|
||||
if (any(geomSize<=0)) call IO_error(error_ID = 844, ext_msg='size')
|
||||
if (any(cells<1)) call IO_error(error_ID = 844, ext_msg='cells')
|
||||
material = material + 1
|
||||
if (any(material<1)) call IO_error(error_ID = 844, ext_msg='material ID < 0')
|
||||
|
||||
|
@ -502,13 +502,13 @@ end subroutine readVTI
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
!> @brief Calculate undeformed position of IPs/cell centers (pretend to be an element)
|
||||
!---------------------------------------------------------------------------------------------------
|
||||
function IPcoordinates0(grid,geomSize,grid3Offset)
|
||||
function IPcoordinates0(cells,geomSize,cells3Offset)
|
||||
|
||||
integer, dimension(3), intent(in) :: grid ! grid (for this process!)
|
||||
integer, dimension(3), intent(in) :: cells ! cells (for this process!)
|
||||
real(pReal), dimension(3), intent(in) :: geomSize ! size (for this process!)
|
||||
integer, intent(in) :: grid3Offset ! grid(3) offset
|
||||
integer, intent(in) :: cells3Offset ! cells(3) offset
|
||||
|
||||
real(pReal), dimension(3,product(grid)) :: ipCoordinates0
|
||||
real(pReal), dimension(3,product(cells)) :: ipCoordinates0
|
||||
|
||||
integer :: &
|
||||
a,b,c, &
|
||||
|
@ -516,9 +516,9 @@ function IPcoordinates0(grid,geomSize,grid3Offset)
|
|||
|
||||
|
||||
i = 0
|
||||
do c = 1, grid(3); do b = 1, grid(2); do a = 1, grid(1)
|
||||
do c = 1, cells(3); do b = 1, cells(2); do a = 1, cells(1)
|
||||
i = i + 1
|
||||
IPcoordinates0(1:3,i) = geomSize/real(grid,pReal) * (real([a,b,grid3Offset+c],pReal) -0.5_pReal)
|
||||
IPcoordinates0(1:3,i) = geomSize/real(cells,pReal) * (real([a,b,cells3Offset+c],pReal) -0.5_pReal)
|
||||
end do; end do; end do
|
||||
|
||||
end function IPcoordinates0
|
||||
|
@ -527,22 +527,22 @@ end function IPcoordinates0
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
!> @brief Calculate position of undeformed nodes (pretend to be an element)
|
||||
!---------------------------------------------------------------------------------------------------
|
||||
pure function nodes0(grid,geomSize,grid3Offset)
|
||||
pure function nodes0(cells,geomSize,cells3Offset)
|
||||
|
||||
integer, dimension(3), intent(in) :: grid ! grid (for this process!)
|
||||
integer, dimension(3), intent(in) :: cells ! cells (for this process!)
|
||||
real(pReal), dimension(3), intent(in) :: geomSize ! size (for this process!)
|
||||
integer, intent(in) :: grid3Offset ! grid(3) offset
|
||||
integer, intent(in) :: cells3Offset ! cells(3) offset
|
||||
|
||||
real(pReal), dimension(3,product(grid+1)) :: nodes0
|
||||
real(pReal), dimension(3,product(cells+1)) :: nodes0
|
||||
|
||||
integer :: &
|
||||
a,b,c, &
|
||||
n
|
||||
|
||||
n = 0
|
||||
do c = 0, grid3; do b = 0, grid(2); do a = 0, grid(1)
|
||||
do c = 0, cells3; do b = 0, cells(2); do a = 0, cells(1)
|
||||
n = n + 1
|
||||
nodes0(1:3,n) = geomSize/real(grid,pReal) * real([a,b,grid3Offset+c],pReal)
|
||||
nodes0(1:3,n) = geomSize/real(cells,pReal) * real([a,b,cells3Offset+c],pReal)
|
||||
end do; end do; end do
|
||||
|
||||
end function nodes0
|
||||
|
@ -551,17 +551,17 @@ end function nodes0
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief Calculate IP interface areas
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
pure function cellSurfaceArea(geomSize,grid)
|
||||
pure function cellSurfaceArea(geomSize,cells)
|
||||
|
||||
real(pReal), dimension(3), intent(in) :: geomSize ! size (for this process!)
|
||||
integer, dimension(3), intent(in) :: grid ! grid (for this process!)
|
||||
integer, dimension(3), intent(in) :: cells ! cells (for this process!)
|
||||
|
||||
real(pReal), dimension(6,1,product(grid)) :: cellSurfaceArea
|
||||
real(pReal), dimension(6,1,product(cells)) :: cellSurfaceArea
|
||||
|
||||
|
||||
cellSurfaceArea(1:2,1,:) = geomSize(2)/real(grid(2)) * geomSize(3)/real(grid(3))
|
||||
cellSurfaceArea(3:4,1,:) = geomSize(3)/real(grid(3)) * geomSize(1)/real(grid(1))
|
||||
cellSurfaceArea(5:6,1,:) = geomSize(1)/real(grid(1)) * geomSize(2)/real(grid(2))
|
||||
cellSurfaceArea(1:2,1,:) = geomSize(2)/real(cells(2)) * geomSize(3)/real(cells(3))
|
||||
cellSurfaceArea(3:4,1,:) = geomSize(3)/real(cells(3)) * geomSize(1)/real(cells(1))
|
||||
cellSurfaceArea(5:6,1,:) = geomSize(1)/real(cells(1)) * geomSize(2)/real(cells(2))
|
||||
|
||||
end function cellSurfaceArea
|
||||
|
||||
|
@ -588,42 +588,42 @@ end function cellSurfaceNormal
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief Build IP neighborhood relations
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
pure function IPneighborhood(grid)
|
||||
pure function IPneighborhood(cells)
|
||||
|
||||
integer, dimension(3), intent(in) :: grid ! grid (for this process!)
|
||||
integer, dimension(3), intent(in) :: cells ! cells (for this process!)
|
||||
|
||||
integer, dimension(3,6,1,product(grid)) :: IPneighborhood !< 6 neighboring IPs as [element ID, IP ID, face ID]
|
||||
integer, dimension(3,6,1,product(cells)) :: IPneighborhood !< 6 neighboring IPs as [element ID, IP ID, face ID]
|
||||
|
||||
integer :: &
|
||||
x,y,z, &
|
||||
e
|
||||
|
||||
e = 0
|
||||
do z = 0,grid(3)-1; do y = 0,grid(2)-1; do x = 0,grid(1)-1
|
||||
do z = 0,cells(3)-1; do y = 0,cells(2)-1; do x = 0,cells(1)-1
|
||||
e = e + 1
|
||||
! element ID
|
||||
IPneighborhood(1,1,1,e) = z * grid(1) * grid(2) &
|
||||
+ y * grid(1) &
|
||||
+ modulo(x+1,grid(1)) &
|
||||
IPneighborhood(1,1,1,e) = z * cells(1) * cells(2) &
|
||||
+ y * cells(1) &
|
||||
+ modulo(x+1,cells(1)) &
|
||||
+ 1
|
||||
IPneighborhood(1,2,1,e) = z * grid(1) * grid(2) &
|
||||
+ y * grid(1) &
|
||||
+ modulo(x-1,grid(1)) &
|
||||
IPneighborhood(1,2,1,e) = z * cells(1) * cells(2) &
|
||||
+ y * cells(1) &
|
||||
+ modulo(x-1,cells(1)) &
|
||||
+ 1
|
||||
IPneighborhood(1,3,1,e) = z * grid(1) * grid(2) &
|
||||
+ modulo(y+1,grid(2)) * grid(1) &
|
||||
IPneighborhood(1,3,1,e) = z * cells(1) * cells(2) &
|
||||
+ modulo(y+1,cells(2)) * cells(1) &
|
||||
+ x &
|
||||
+ 1
|
||||
IPneighborhood(1,4,1,e) = z * grid(1) * grid(2) &
|
||||
+ modulo(y-1,grid(2)) * grid(1) &
|
||||
IPneighborhood(1,4,1,e) = z * cells(1) * cells(2) &
|
||||
+ modulo(y-1,cells(2)) * cells(1) &
|
||||
+ x &
|
||||
+ 1
|
||||
IPneighborhood(1,5,1,e) = modulo(z+1,grid(3)) * grid(1) * grid(2) &
|
||||
+ y * grid(1) &
|
||||
IPneighborhood(1,5,1,e) = modulo(z+1,cells(3)) * cells(1) * cells(2) &
|
||||
+ y * cells(1) &
|
||||
+ x &
|
||||
+ 1
|
||||
IPneighborhood(1,6,1,e) = modulo(z-1,grid(3)) * grid(1) * grid(2) &
|
||||
+ y * grid(1) &
|
||||
IPneighborhood(1,6,1,e) = modulo(z-1,cells(3)) * cells(1) * cells(2) &
|
||||
+ y * cells(1) &
|
||||
+ x &
|
||||
+ 1
|
||||
! IP ID
|
||||
|
|
|
@ -106,9 +106,9 @@ subroutine grid_damage_spectral_init()
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! init fields
|
||||
allocate(phi_current(grid(1),grid(2),grid3), source=1.0_pReal)
|
||||
allocate(phi_lastInc(grid(1),grid(2),grid3), source=1.0_pReal)
|
||||
allocate(phi_stagInc(grid(1),grid(2),grid3), source=1.0_pReal)
|
||||
allocate(phi_current(cells(1),cells(2),cells3), source=1.0_pReal)
|
||||
allocate(phi_lastInc(cells(1),cells(2),cells3), source=1.0_pReal)
|
||||
allocate(phi_stagInc(cells(1),cells(2),cells3), source=1.0_pReal)
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! initialize solver specific parts of PETSc
|
||||
|
@ -117,23 +117,23 @@ subroutine grid_damage_spectral_init()
|
|||
call SNESSetOptionsPrefix(SNES_damage,'damage_',err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
localK = 0_pPetscInt
|
||||
localK(worldrank) = int(grid3,pPetscInt)
|
||||
localK(worldrank) = int(cells3,pPetscInt)
|
||||
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call DMDACreate3D(PETSC_COMM_WORLD, &
|
||||
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
||||
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
||||
int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid
|
||||
int(cells(1),pPetscInt),int(cells(2),pPetscInt),int(cells(3),pPetscInt), & ! global cells
|
||||
1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), &
|
||||
1_pPetscInt, 0_pPetscInt, & ! #dof (phi, scalar), ghost boundary width (domain overlap)
|
||||
[int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid
|
||||
[int(cells(1),pPetscInt)],[int(cells(2),pPetscInt)],localK, & ! local cells
|
||||
damage_grid,err_PETSc) ! handle, error
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMsetFromOptions(damage_grid,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMsetUp(damage_grid,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMCreateGlobalVector(damage_grid,solution_vec,err_PETSc) ! global solution vector (grid x 1, i.e. every def grad tensor)
|
||||
call DMCreateGlobalVector(damage_grid,solution_vec,err_PETSc) ! global solution vector (cells x 1, i.e. every def grad tensor)
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMDASNESSetFunctionLocal(damage_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector
|
||||
CHKERRQ(err_PETSc)
|
||||
|
@ -213,7 +213,7 @@ function grid_damage_spectral_solution(Delta_t) result(solution)
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
! updating damage state
|
||||
ce = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1)
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
|
||||
ce = ce + 1
|
||||
call homogenization_set_phi(phi_current(i,j,k),ce)
|
||||
end do; end do; end do
|
||||
|
@ -255,7 +255,7 @@ subroutine grid_damage_spectral_forward(cutBack)
|
|||
call DMDAVecRestoreArrayF90(dm_local,solution_vec,phi_PETSc,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
ce = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1)
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
|
||||
ce = ce + 1
|
||||
call homogenization_set_phi(phi_current(i,j,k),ce)
|
||||
end do; end do; end do
|
||||
|
@ -289,12 +289,12 @@ subroutine formResidual(in,x_scal,r,dummy,err_PETSc)
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
! evaluate polarization field
|
||||
scalarField_real = 0.0_pReal
|
||||
scalarField_real(1:grid(1),1:grid(2),1:grid3) = phi_current
|
||||
scalarField_real(1:cells(1),1:cells(2),1:cells3) = phi_current
|
||||
call utilities_FFTscalarForward
|
||||
call utilities_fourierScalarGradient !< calculate gradient of damage field
|
||||
call utilities_FFTvectorBackward
|
||||
ce = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1)
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
|
||||
ce = ce + 1
|
||||
vectorField_real(1:3,i,j,k) = matmul(homogenization_K_phi(ce) - K_ref, vectorField_real(1:3,i,j,k))
|
||||
end do; end do; end do
|
||||
|
@ -302,7 +302,7 @@ subroutine formResidual(in,x_scal,r,dummy,err_PETSc)
|
|||
call utilities_fourierVectorDivergence !< calculate damage divergence in fourier field
|
||||
call utilities_FFTscalarBackward
|
||||
ce = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1)
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
|
||||
ce = ce + 1
|
||||
scalarField_real(i,j,k) = params%Delta_t*(scalarField_real(i,j,k) + homogenization_f_phi(phi_current(i,j,k),ce)) &
|
||||
+ homogenization_mu_phi(ce)*(phi_lastInc(i,j,k) - phi_current(i,j,k)) &
|
||||
|
@ -315,14 +315,14 @@ subroutine formResidual(in,x_scal,r,dummy,err_PETSc)
|
|||
call utilities_fourierGreenConvolution(K_ref, mu_ref, params%Delta_t)
|
||||
call utilities_FFTscalarBackward
|
||||
|
||||
where(scalarField_real(1:grid(1),1:grid(2),1:grid3) > phi_lastInc) &
|
||||
scalarField_real(1:grid(1),1:grid(2),1:grid3) = phi_lastInc
|
||||
where(scalarField_real(1:grid(1),1:grid(2),1:grid3) < num%residualStiffness) &
|
||||
scalarField_real(1:grid(1),1:grid(2),1:grid3) = num%residualStiffness
|
||||
where(scalarField_real(1:cells(1),1:cells(2),1:cells3) > phi_lastInc) &
|
||||
scalarField_real(1:cells(1),1:cells(2),1:cells3) = phi_lastInc
|
||||
where(scalarField_real(1:cells(1),1:cells(2),1:cells3) < num%residualStiffness) &
|
||||
scalarField_real(1:cells(1),1:cells(2),1:cells3) = num%residualStiffness
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! constructing residual
|
||||
r = scalarField_real(1:grid(1),1:grid(2),1:grid3) - phi_current
|
||||
r = scalarField_real(1:cells(1),1:cells(2),1:cells3) - phi_current
|
||||
err_PETSc = 0
|
||||
|
||||
end subroutine formResidual
|
||||
|
@ -339,7 +339,7 @@ subroutine updateReference()
|
|||
|
||||
K_ref = 0.0_pReal
|
||||
mu_ref = 0.0_pReal
|
||||
do ce = 1, product(grid(1:2))*grid3
|
||||
do ce = 1, product(cells(1:2))*cells3
|
||||
K_ref = K_ref + homogenization_K_phi(ce)
|
||||
mu_ref = mu_ref + homogenization_mu_phi(ce)
|
||||
end do
|
||||
|
|
|
@ -153,9 +153,9 @@ subroutine grid_mechanical_FEM_init
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! allocate global fields
|
||||
allocate(F (3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
|
||||
allocate(P_current (3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
|
||||
allocate(F_lastInc (3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
|
||||
allocate(F (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
|
||||
allocate(P_current (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
|
||||
allocate(F_lastInc (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! initialize solver specific parts of PETSc
|
||||
|
@ -164,16 +164,16 @@ subroutine grid_mechanical_FEM_init
|
|||
call SNESSetOptionsPrefix(SNES_mechanical,'mechanical_',err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
localK = 0_pPetscInt
|
||||
localK(worldrank) = int(grid3,pPetscInt)
|
||||
localK(worldrank) = int(cells3,pPetscInt)
|
||||
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||
if(err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call DMDACreate3d(PETSC_COMM_WORLD, &
|
||||
DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, DM_BOUNDARY_PERIODIC, &
|
||||
DMDA_STENCIL_BOX, &
|
||||
int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid
|
||||
int(cells(1),pPetscInt),int(cells(2),pPetscInt),int(cells(3),pPetscInt), & ! global cells
|
||||
1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), &
|
||||
3_pPetscInt, 1_pPetscInt, & ! #dof (u, vector), ghost boundary width (domain overlap)
|
||||
[int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid
|
||||
[int(cells(1),pPetscInt)],[int(cells(2),pPetscInt)],localK, & ! local cells
|
||||
mechanical_grid,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMsetFromOptions(mechanical_grid,err_PETSc)
|
||||
|
@ -214,7 +214,7 @@ subroutine grid_mechanical_FEM_init
|
|||
call DMDAVecGetArrayF90(mechanical_grid,solution_lastInc,u_lastInc,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
|
||||
delta = geomSize/real(grid,pReal) ! grid spacing
|
||||
delta = geomSize/real(cells,pReal) ! grid spacing
|
||||
detJ = product(delta) ! cell volume
|
||||
|
||||
BMat = reshape(real([-1.0_pReal/delta(1),-1.0_pReal/delta(2),-1.0_pReal/delta(3), &
|
||||
|
@ -255,11 +255,11 @@ subroutine grid_mechanical_FEM_init
|
|||
call HDF5_read(u_lastInc,groupHandle,'u_lastInc')
|
||||
|
||||
elseif (interface_restartInc == 0) then restartRead
|
||||
F_lastInc = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) ! initialize to identity
|
||||
F = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3)
|
||||
F_lastInc = spread(spread(spread(math_I3,3,cells(1)),4,cells(2)),5,cells3) ! initialize to identity
|
||||
F = spread(spread(spread(math_I3,3,cells(1)),4,cells(2)),5,cells3)
|
||||
endif restartRead
|
||||
|
||||
homogenization_F0 = reshape(F_lastInc, [3,3,product(grid(1:2))*grid3]) ! set starting condition for homogenization_mechanical_response
|
||||
homogenization_F0 = reshape(F_lastInc, [3,3,product(cells(1:2))*cells3]) ! set starting condition for homogenization_mechanical_response
|
||||
call utilities_updateCoords(F)
|
||||
call utilities_constitutiveResponse(P_current,P_av,C_volAvg,devNull, & ! stress field, stress avg, global average of stiffness and (min+max)/2
|
||||
F, & ! target F
|
||||
|
@ -339,7 +339,7 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai
|
|||
type(tBoundaryCondition), intent(in) :: &
|
||||
stress_BC, &
|
||||
deformation_BC
|
||||
type(rotation), intent(in) :: &
|
||||
type(tRotation), intent(in) :: &
|
||||
rotation_BC
|
||||
PetscErrorCode :: err_PETSc
|
||||
PetscScalar, pointer, dimension(:,:,:,:) :: &
|
||||
|
@ -386,7 +386,7 @@ subroutine grid_mechanical_FEM_forward(cutBack,guess,Delta_t,Delta_t_old,t_remai
|
|||
|
||||
F_lastInc = F
|
||||
|
||||
homogenization_F0 = reshape(F, [3,3,product(grid(1:2))*grid3])
|
||||
homogenization_F0 = reshape(F, [3,3,product(cells(1:2))*cells3])
|
||||
endif
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
|
@ -556,13 +556,13 @@ subroutine formResidual(da_local,x_local, &
|
|||
! get deformation gradient
|
||||
call DMDAVecGetArrayF90(da_local,x_local,x_scal,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
do k = grid3offset+1, grid3offset+grid3; do j = 1, grid(2); do i = 1, grid(1)
|
||||
do k = cells3Offset+1, cells3Offset+cells3; do j = 1, cells(2); do i = 1, cells(1)
|
||||
ctr = 0
|
||||
do kk = -1, 0; do jj = -1, 0; do ii = -1, 0
|
||||
ctr = ctr + 1
|
||||
x_elem(ctr,1:3) = x_scal(0:2,i+ii,j+jj,k+kk)
|
||||
enddo; enddo; enddo
|
||||
F(1:3,1:3,i,j,k-grid3offset) = params%rotation_BC%rotate(F_aim,active=.true.) + transpose(matmul(BMat,x_elem))
|
||||
F(1:3,1:3,i,j,k-cells3Offset) = params%rotation_BC%rotate(F_aim,active=.true.) + transpose(matmul(BMat,x_elem))
|
||||
enddo; enddo; enddo
|
||||
call DMDAVecRestoreArrayF90(da_local,x_local,x_scal,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
|
@ -589,14 +589,14 @@ subroutine formResidual(da_local,x_local, &
|
|||
call DMDAVecGetArrayF90(da_local,x_local,x_scal,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
ele = 0
|
||||
do k = grid3offset+1, grid3offset+grid3; do j = 1, grid(2); do i = 1, grid(1)
|
||||
do k = cells3Offset+1, cells3Offset+cells3; do j = 1, cells(2); do i = 1, cells(1)
|
||||
ctr = 0
|
||||
do kk = -1, 0; do jj = -1, 0; do ii = -1, 0
|
||||
ctr = ctr + 1
|
||||
x_elem(ctr,1:3) = x_scal(0:2,i+ii,j+jj,k+kk)
|
||||
enddo; enddo; enddo
|
||||
ele = ele + 1
|
||||
f_elem = matmul(transpose(BMat),transpose(P_current(1:3,1:3,i,j,k-grid3offset)))*detJ + &
|
||||
f_elem = matmul(transpose(BMat),transpose(P_current(1:3,1:3,i,j,k-cells3Offset)))*detJ + &
|
||||
matmul(HGMat,x_elem)*(homogenization_dPdF(1,1,1,1,ele) + &
|
||||
homogenization_dPdF(2,2,2,2,ele) + &
|
||||
homogenization_dPdF(3,3,3,3,ele))/3.0_pReal
|
||||
|
@ -615,17 +615,17 @@ subroutine formResidual(da_local,x_local, &
|
|||
! applying boundary conditions
|
||||
call DMDAVecGetArrayF90(da_local,f_local,r,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
if (grid3offset == 0) then
|
||||
r(0:2,0, 0, 0) = 0.0_pReal
|
||||
r(0:2,grid(1),0, 0) = 0.0_pReal
|
||||
r(0:2,0, grid(2),0) = 0.0_pReal
|
||||
r(0:2,grid(1),grid(2),0) = 0.0_pReal
|
||||
if (cells3Offset == 0) then
|
||||
r(0:2,0, 0, 0) = 0.0_pReal
|
||||
r(0:2,cells(1),0, 0) = 0.0_pReal
|
||||
r(0:2,0, cells(2),0) = 0.0_pReal
|
||||
r(0:2,cells(1),cells(2),0) = 0.0_pReal
|
||||
end if
|
||||
if (grid3+grid3offset == grid(3)) then
|
||||
r(0:2,0, 0, grid(3)) = 0.0_pReal
|
||||
r(0:2,grid(1),0, grid(3)) = 0.0_pReal
|
||||
r(0:2,0, grid(2),grid(3)) = 0.0_pReal
|
||||
r(0:2,grid(1),grid(2),grid(3)) = 0.0_pReal
|
||||
if (cells3+cells3Offset == cells(3)) then
|
||||
r(0:2,0, 0, cells(3)) = 0.0_pReal
|
||||
r(0:2,cells(1),0, cells(3)) = 0.0_pReal
|
||||
r(0:2,0, cells(2),cells(3)) = 0.0_pReal
|
||||
r(0:2,cells(1),cells(2),cells(3)) = 0.0_pReal
|
||||
end if
|
||||
call DMDAVecRestoreArrayF90(da_local,f_local,r,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
|
@ -663,7 +663,7 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,err_PETSc)
|
|||
call MatZeroEntries(Jac,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
ce = 0
|
||||
do k = grid3offset+1, grid3offset+grid3; do j = 1, grid(2); do i = 1, grid(1)
|
||||
do k = cells3Offset+1, cells3Offset+cells3; do j = 1, cells(2); do i = 1, cells(1)
|
||||
ctr = 0
|
||||
do kk = -1, 0; do jj = -1, 0; do ii = -1, 0
|
||||
ctr = ctr + 1
|
||||
|
@ -719,7 +719,7 @@ subroutine formJacobian(da_local,x_local,Jac_pre,Jac,dummy,err_PETSc)
|
|||
call DMDAVecGetArrayF90(da_local,coordinates,x_scal,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
ce = 0
|
||||
do k = grid3offset+1, grid3offset+grid3; do j = 1, grid(2); do i = 1, grid(1)
|
||||
do k = cells3Offset+1, cells3Offset+cells3; do j = 1, cells(2); do i = 1, cells(1)
|
||||
ce = ce + 1
|
||||
x_scal(0:2,i-1,j-1,k-1) = discretization_IPcoords(1:3,ce)
|
||||
enddo; enddo; enddo
|
||||
|
|
|
@ -79,6 +79,12 @@ module grid_mechanical_spectral_basic
|
|||
err_BC, & !< deviation from stress BC
|
||||
err_div !< RMS of div of P
|
||||
|
||||
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||
type(MPI_Status) :: status
|
||||
#else
|
||||
integer, dimension(MPI_STATUS_SIZE) :: status
|
||||
#endif
|
||||
|
||||
integer :: &
|
||||
totalIter = 0 !< total iteration in current increment
|
||||
|
||||
|
@ -96,7 +102,7 @@ contains
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine grid_mechanical_spectral_basic_init
|
||||
|
||||
real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: P
|
||||
real(pReal), dimension(3,3,cells(1),cells(2),cells3) :: P
|
||||
PetscErrorCode :: err_PETSc
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||
PetscScalar, pointer, dimension(:,:,:,:) :: &
|
||||
|
@ -153,8 +159,8 @@ subroutine grid_mechanical_spectral_basic_init
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! allocate global fields
|
||||
allocate(F_lastInc(3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
|
||||
allocate(Fdot (3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
|
||||
allocate(F_lastInc(3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
|
||||
allocate(Fdot (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! initialize solver specific parts of PETSc
|
||||
|
@ -163,23 +169,23 @@ subroutine grid_mechanical_spectral_basic_init
|
|||
call SNESSetOptionsPrefix(SNES_mechanical,'mechanical_',err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
localK = 0_pPetscInt
|
||||
localK(worldrank) = int(grid3,pPetscInt)
|
||||
localK(worldrank) = int(cells3,pPetscInt)
|
||||
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call DMDACreate3d(PETSC_COMM_WORLD, &
|
||||
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
||||
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
||||
int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid
|
||||
int(cells(1),pPetscInt),int(cells(2),pPetscInt),int(cells(3),pPetscInt), & ! global cells
|
||||
1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), &
|
||||
9_pPetscInt, 0_pPetscInt, & ! #dof (F, tensor), ghost boundary width (domain overlap)
|
||||
[int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid
|
||||
[int(cells(1),pPetscInt)],[int(cells(2),pPetscInt)],localK, & ! local cells
|
||||
da,err_PETSc) ! handle, error
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMsetFromOptions(da,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMsetUp(da,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMcreateGlobalVector(da,solution_vec,err_PETSc) ! global solution vector (grid x 9, i.e. every def grad tensor)
|
||||
call DMcreateGlobalVector(da,solution_vec,err_PETSc) ! global solution vector (cells x 9, i.e. every def grad tensor)
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector
|
||||
CHKERRQ(err_PETSc)
|
||||
|
@ -217,11 +223,11 @@ subroutine grid_mechanical_spectral_basic_init
|
|||
call HDF5_read(F_lastInc,groupHandle,'F_lastInc')
|
||||
|
||||
elseif (interface_restartInc == 0) then restartRead
|
||||
F_lastInc = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) ! initialize to identity
|
||||
F = reshape(F_lastInc,[9,grid(1),grid(2),grid3])
|
||||
F_lastInc = spread(spread(spread(math_I3,3,cells(1)),4,cells(2)),5,cells3) ! initialize to identity
|
||||
F = reshape(F_lastInc,[9,cells(1),cells(2),cells3])
|
||||
end if restartRead
|
||||
|
||||
homogenization_F0 = reshape(F_lastInc, [3,3,product(grid(1:2))*grid3]) ! set starting condition for homogenization_mechanical_response
|
||||
homogenization_F0 = reshape(F_lastInc, [3,3,product(cells(1:2))*cells3]) ! set starting condition for homogenization_mechanical_response
|
||||
call utilities_updateCoords(reshape(F,shape(F_lastInc)))
|
||||
call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2
|
||||
reshape(F,shape(F_lastInc)), & ! target F
|
||||
|
@ -244,7 +250,7 @@ subroutine grid_mechanical_spectral_basic_init
|
|||
call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', &
|
||||
MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI)
|
||||
call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,status,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call MPI_File_close(fileUnit,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
@ -310,7 +316,7 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_
|
|||
type(tBoundaryCondition), intent(in) :: &
|
||||
stress_BC, &
|
||||
deformation_BC
|
||||
type(rotation), intent(in) :: &
|
||||
type(tRotation), intent(in) :: &
|
||||
rotation_BC
|
||||
PetscErrorCode :: err_PETSc
|
||||
PetscScalar, pointer, dimension(:,:,:,:) :: F
|
||||
|
@ -343,11 +349,11 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_
|
|||
end if
|
||||
|
||||
Fdot = utilities_calculateRate(guess, &
|
||||
F_lastInc,reshape(F,[3,3,grid(1),grid(2),grid3]),Delta_t_old, &
|
||||
F_lastInc,reshape(F,[3,3,cells(1),cells(2),cells3]),Delta_t_old, &
|
||||
rotation_BC%rotate(F_aimDot,active=.true.))
|
||||
F_lastInc = reshape(F,[3,3,grid(1),grid(2),grid3])
|
||||
F_lastInc = reshape(F,[3,3,cells(1),cells(2),cells3])
|
||||
|
||||
homogenization_F0 = reshape(F,[3,3,product(grid(1:2))*grid3])
|
||||
homogenization_F0 = reshape(F,[3,3,product(cells(1:2))*cells3])
|
||||
end if
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
|
@ -359,7 +365,7 @@ subroutine grid_mechanical_spectral_basic_forward(cutBack,guess,Delta_t,Delta_t_
|
|||
+ merge(.0_pReal,stress_BC%values,stress_BC%mask)*Delta_t
|
||||
|
||||
F = reshape(utilities_forwardField(Delta_t,F_lastInc,Fdot, & ! estimate of F at end of time+Delta_t that matches rotated F_aim on average
|
||||
rotation_BC%rotate(F_aim,active=.true.)),[9,grid(1),grid(2),grid3])
|
||||
rotation_BC%rotate(F_aim,active=.true.)),[9,cells(1),cells(2),cells3])
|
||||
call DMDAVecRestoreArrayF90(da,solution_vec,F,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
|
||||
|
@ -530,7 +536,7 @@ subroutine formResidual(in, F, &
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
! updated deformation gradient using fix point algorithm of basic scheme
|
||||
tensorField_real = 0.0_pReal
|
||||
tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) = r ! store fPK field for subsequent FFT forward transform
|
||||
tensorField_real(1:3,1:3,1:cells(1),1:cells(2),1:cells3) = r ! store fPK field for subsequent FFT forward transform
|
||||
call utilities_FFTtensorForward ! FFT forward of global "tensorField_real"
|
||||
err_div = utilities_divergenceRMS() ! divRMS of tensorField_fourier for later use
|
||||
call utilities_fourierGammaConvolution(params%rotation_BC%rotate(deltaF_aim,active=.true.)) ! convolution of Gamma and tensorField_fourier
|
||||
|
@ -538,7 +544,7 @@ subroutine formResidual(in, F, &
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! constructing residual
|
||||
r = tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) ! Gamma*P gives correction towards div(P) = 0, so needs to be zero, too
|
||||
r = tensorField_real(1:3,1:3,1:cells(1),1:cells(2),1:cells3) ! Gamma*P gives correction towards div(P) = 0, so needs to be zero, too
|
||||
|
||||
end subroutine formResidual
|
||||
|
||||
|
|
|
@ -90,6 +90,12 @@ module grid_mechanical_spectral_polarisation
|
|||
err_curl, & !< RMS of curl of F
|
||||
err_div !< RMS of div of P
|
||||
|
||||
#if (PETSC_VERSION_MAJOR==3 && PETSC_VERSION_MINOR>14) && !defined(PETSC_HAVE_MPI_F90MODULE_VISIBILITY)
|
||||
type(MPI_Status) :: status
|
||||
#else
|
||||
integer, dimension(MPI_STATUS_SIZE) :: status
|
||||
#endif
|
||||
|
||||
integer :: &
|
||||
totalIter = 0 !< total iteration in current increment
|
||||
|
||||
|
@ -107,7 +113,7 @@ contains
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine grid_mechanical_spectral_polarisation_init
|
||||
|
||||
real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: P
|
||||
real(pReal), dimension(3,3,cells(1),cells(2),cells3) :: P
|
||||
PetscErrorCode :: err_PETSc
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||
PetscScalar, pointer, dimension(:,:,:,:) :: &
|
||||
|
@ -171,10 +177,10 @@ subroutine grid_mechanical_spectral_polarisation_init
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! allocate global fields
|
||||
allocate(F_lastInc (3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
|
||||
allocate(Fdot (3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
|
||||
allocate(F_tau_lastInc(3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
|
||||
allocate(F_tauDot (3,3,grid(1),grid(2),grid3),source = 0.0_pReal)
|
||||
allocate(F_lastInc (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
|
||||
allocate(Fdot (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
|
||||
allocate(F_tau_lastInc(3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
|
||||
allocate(F_tauDot (3,3,cells(1),cells(2),cells3),source = 0.0_pReal)
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! initialize solver specific parts of PETSc
|
||||
|
@ -183,23 +189,23 @@ subroutine grid_mechanical_spectral_polarisation_init
|
|||
call SNESSetOptionsPrefix(SNES_mechanical,'mechanical_',err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
localK = 0_pPetscInt
|
||||
localK(worldrank) = int(grid3,pPetscInt)
|
||||
localK(worldrank) = int(cells3,pPetscInt)
|
||||
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call DMDACreate3d(PETSC_COMM_WORLD, &
|
||||
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
||||
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
||||
int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid
|
||||
int(cells(1),pPetscInt),int(cells(2),pPetscInt),int(cells(3),pPetscInt), & ! global cells
|
||||
1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), &
|
||||
18_pPetscInt, 0_pPetscInt, & ! #dof (2xtensor), ghost boundary width (domain overlap)
|
||||
[int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid
|
||||
[int(cells(1),pPetscInt)],[int(cells(2),pPetscInt)],localK, & ! local cells
|
||||
da,err_PETSc) ! handle, error
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMsetFromOptions(da,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMsetUp(da,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMcreateGlobalVector(da,solution_vec,err_PETSc) ! global solution vector (grid x 18, i.e. every def grad tensor)
|
||||
call DMcreateGlobalVector(da,solution_vec,err_PETSc) ! global solution vector (cells x 18, i.e. every def grad tensor)
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMDASNESsetFunctionLocal(da,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector
|
||||
CHKERRQ(err_PETSc)
|
||||
|
@ -241,13 +247,13 @@ subroutine grid_mechanical_spectral_polarisation_init
|
|||
call HDF5_read(F_tau_lastInc,groupHandle,'F_tau_lastInc')
|
||||
|
||||
elseif (interface_restartInc == 0) then restartRead
|
||||
F_lastInc = spread(spread(spread(math_I3,3,grid(1)),4,grid(2)),5,grid3) ! initialize to identity
|
||||
F = reshape(F_lastInc,[9,grid(1),grid(2),grid3])
|
||||
F_lastInc = spread(spread(spread(math_I3,3,cells(1)),4,cells(2)),5,cells3) ! initialize to identity
|
||||
F = reshape(F_lastInc,[9,cells(1),cells(2),cells3])
|
||||
F_tau = 2.0_pReal*F
|
||||
F_tau_lastInc = 2.0_pReal*F_lastInc
|
||||
end if restartRead
|
||||
|
||||
homogenization_F0 = reshape(F_lastInc, [3,3,product(grid(1:2))*grid3]) ! set starting condition for homogenization_mechanical_response
|
||||
homogenization_F0 = reshape(F_lastInc, [3,3,product(cells(1:2))*cells3]) ! set starting condition for homogenization_mechanical_response
|
||||
call utilities_updateCoords(reshape(F,shape(F_lastInc)))
|
||||
call utilities_constitutiveResponse(P,P_av,C_volAvg,C_minMaxAvg, & ! stress field, stress avg, global average of stiffness and (min+max)/2
|
||||
reshape(F,shape(F_lastInc)), & ! target F
|
||||
|
@ -270,7 +276,7 @@ subroutine grid_mechanical_spectral_polarisation_init
|
|||
call MPI_File_open(MPI_COMM_WORLD, trim(getSolverJobName())//'.C_ref', &
|
||||
MPI_MODE_RDONLY,MPI_INFO_NULL,fileUnit,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_STATUS_IGNORE,err_MPI)
|
||||
call MPI_File_read(fileUnit,C_minMaxAvg,81_MPI_INTEGER_KIND,MPI_DOUBLE,status,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call MPI_File_close(fileUnit,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
@ -342,7 +348,7 @@ subroutine grid_mechanical_spectral_polarisation_forward(cutBack,guess,Delta_t,D
|
|||
type(tBoundaryCondition), intent(in) :: &
|
||||
stress_BC, &
|
||||
deformation_BC
|
||||
type(rotation), intent(in) :: &
|
||||
type(tRotation), intent(in) :: &
|
||||
rotation_BC
|
||||
PetscErrorCode :: err_PETSc
|
||||
PetscScalar, pointer, dimension(:,:,:,:) :: FandF_tau, F, F_tau
|
||||
|
@ -379,15 +385,15 @@ subroutine grid_mechanical_spectral_polarisation_forward(cutBack,guess,Delta_t,D
|
|||
end if
|
||||
|
||||
Fdot = utilities_calculateRate(guess, &
|
||||
F_lastInc,reshape(F,[3,3,grid(1),grid(2),grid3]),Delta_t_old, &
|
||||
F_lastInc,reshape(F,[3,3,cells(1),cells(2),cells3]),Delta_t_old, &
|
||||
rotation_BC%rotate(F_aimDot,active=.true.))
|
||||
F_tauDot = utilities_calculateRate(guess, &
|
||||
F_tau_lastInc,reshape(F_tau,[3,3,grid(1),grid(2),grid3]), Delta_t_old, &
|
||||
F_tau_lastInc,reshape(F_tau,[3,3,cells(1),cells(2),cells3]), Delta_t_old, &
|
||||
rotation_BC%rotate(F_aimDot,active=.true.))
|
||||
F_lastInc = reshape(F, [3,3,grid(1),grid(2),grid3])
|
||||
F_tau_lastInc = reshape(F_tau,[3,3,grid(1),grid(2),grid3])
|
||||
F_lastInc = reshape(F, [3,3,cells(1),cells(2),cells3])
|
||||
F_tau_lastInc = reshape(F_tau,[3,3,cells(1),cells(2),cells3])
|
||||
|
||||
homogenization_F0 = reshape(F,[3,3,product(grid(1:2))*grid3])
|
||||
homogenization_F0 = reshape(F,[3,3,product(cells(1:2))*cells3])
|
||||
end if
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
|
@ -400,12 +406,12 @@ subroutine grid_mechanical_spectral_polarisation_forward(cutBack,guess,Delta_t,D
|
|||
|
||||
F = reshape(utilities_forwardField(Delta_t,F_lastInc,Fdot, & ! estimate of F at end of time+Delta_t that matches rotated F_aim on average
|
||||
rotation_BC%rotate(F_aim,active=.true.)),&
|
||||
[9,grid(1),grid(2),grid3])
|
||||
[9,cells(1),cells(2),cells3])
|
||||
if (guess) then
|
||||
F_tau = reshape(Utilities_forwardField(Delta_t,F_tau_lastInc,F_taudot), &
|
||||
[9,grid(1),grid(2),grid3]) ! does not have any average value as boundary condition
|
||||
[9,cells(1),cells(2),cells3]) ! does not have any average value as boundary condition
|
||||
else
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1, grid(1)
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1, cells(1)
|
||||
F_lambda33 = reshape(F_tau(1:9,i,j,k)-F(1:9,i,j,k),[3,3])
|
||||
F_lambda33 = math_I3 &
|
||||
+ math_mul3333xx33(S_scale,0.5_pReal*matmul(F_lambda33, &
|
||||
|
@ -597,7 +603,7 @@ subroutine formResidual(in, FandF_tau, &
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
!
|
||||
tensorField_real = 0.0_pReal
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1, grid(1)
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1, cells(1)
|
||||
tensorField_real(1:3,1:3,i,j,k) = &
|
||||
num%beta*math_mul3333xx33(C_scale,F(1:3,1:3,i,j,k) - math_I3) -&
|
||||
num%alpha*matmul(F(1:3,1:3,i,j,k), &
|
||||
|
@ -612,7 +618,7 @@ subroutine formResidual(in, FandF_tau, &
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! constructing residual
|
||||
r_F_tau = num%beta*F - tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3)
|
||||
r_F_tau = num%beta*F - tensorField_real(1:3,1:3,1:cells(1),1:cells(2),1:cells3)
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! evaluate constitutive response
|
||||
|
@ -629,14 +635,14 @@ subroutine formResidual(in, FandF_tau, &
|
|||
params%stress_mask)))
|
||||
! calculate divergence
|
||||
tensorField_real = 0.0_pReal
|
||||
tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) = r_F !< stress field in disguise
|
||||
tensorField_real(1:3,1:3,1:cells(1),1:cells(2),1:cells3) = r_F !< stress field in disguise
|
||||
call utilities_FFTtensorForward
|
||||
err_div = utilities_divergenceRMS() !< root mean squared error in divergence of stress
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! constructing residual
|
||||
e = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1, grid(1)
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1, cells(1)
|
||||
e = e + 1
|
||||
r_F(1:3,1:3,i,j,k) = &
|
||||
math_mul3333xx33(math_invSym3333(homogenization_dPdF(1:3,1:3,1:3,1:3,e) + C_scale), &
|
||||
|
@ -648,7 +654,7 @@ subroutine formResidual(in, FandF_tau, &
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
! calculating curl
|
||||
tensorField_real = 0.0_pReal
|
||||
tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) = F
|
||||
tensorField_real(1:3,1:3,1:cells(1),1:cells(2),1:cells3) = F
|
||||
call utilities_FFTtensorForward
|
||||
err_curl = utilities_curlRMS()
|
||||
|
||||
|
|
|
@ -105,9 +105,9 @@ subroutine grid_thermal_spectral_init(T_0)
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! init fields
|
||||
allocate(T_current(grid(1),grid(2),grid3), source=T_0)
|
||||
allocate(T_lastInc(grid(1),grid(2),grid3), source=T_0)
|
||||
allocate(T_stagInc(grid(1),grid(2),grid3), source=T_0)
|
||||
allocate(T_current(cells(1),cells(2),cells3), source=T_0)
|
||||
allocate(T_lastInc(cells(1),cells(2),cells3), source=T_0)
|
||||
allocate(T_stagInc(cells(1),cells(2),cells3), source=T_0)
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! initialize solver specific parts of PETSc
|
||||
|
@ -116,23 +116,23 @@ subroutine grid_thermal_spectral_init(T_0)
|
|||
call SNESSetOptionsPrefix(SNES_thermal,'thermal_',err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
localK = 0_pPetscInt
|
||||
localK(worldrank) = int(grid3,pPetscInt)
|
||||
localK(worldrank) = int(cells3,pPetscInt)
|
||||
call MPI_Allreduce(MPI_IN_PLACE,localK,worldsize,MPI_INTEGER,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call DMDACreate3D(PETSC_COMM_WORLD, &
|
||||
DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, DM_BOUNDARY_NONE, & ! cut off stencil at boundary
|
||||
DMDA_STENCIL_BOX, & ! Moore (26) neighborhood around central point
|
||||
int(grid(1),pPetscInt),int(grid(2),pPetscInt),int(grid(3),pPetscInt), & ! global grid
|
||||
int(cells(1),pPetscInt),int(cells(2),pPetscInt),int(cells(3),pPetscInt), & ! global cells
|
||||
1_pPetscInt, 1_pPetscInt, int(worldsize,pPetscInt), &
|
||||
1_pPetscInt, 0_pPetscInt, & ! #dof (T, scalar), ghost boundary width (domain overlap)
|
||||
[int(grid(1),pPetscInt)],[int(grid(2),pPetscInt)],localK, & ! local grid
|
||||
[int(cells(1),pPetscInt)],[int(cells(2),pPetscInt)],localK, & ! local cells
|
||||
thermal_grid,err_PETSc) ! handle, error
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMsetFromOptions(thermal_grid,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMsetUp(thermal_grid,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMCreateGlobalVector(thermal_grid,solution_vec,err_PETSc) ! global solution vector (grid x 1, i.e. every def grad tensor)
|
||||
call DMCreateGlobalVector(thermal_grid,solution_vec,err_PETSc) ! global solution vector (cells x 1, i.e. every def grad tensor)
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMDASNESSetFunctionLocal(thermal_grid,INSERT_VALUES,formResidual,PETSC_NULL_SNES,err_PETSc) ! residual vector of same shape as solution vector
|
||||
CHKERRQ(err_PETSc)
|
||||
|
@ -150,10 +150,10 @@ subroutine grid_thermal_spectral_init(T_0)
|
|||
|
||||
call HDF5_read(T_current,groupHandle,'T',.false.)
|
||||
call HDF5_read(T_lastInc,groupHandle,'T_lastInc',.false.)
|
||||
end if restartRead
|
||||
end if restartRead
|
||||
|
||||
ce = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1, grid(1)
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1, cells(1)
|
||||
ce = ce + 1
|
||||
call homogenization_thermal_setField(T_current(i,j,k),0.0_pReal,ce)
|
||||
end do; end do; end do
|
||||
|
@ -164,7 +164,7 @@ subroutine grid_thermal_spectral_init(T_0)
|
|||
call DMDAVecRestoreArrayF90(thermal_grid,solution_vec,T_PETSc,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
|
||||
call updateReference
|
||||
call updateReference()
|
||||
|
||||
end subroutine grid_thermal_spectral_init
|
||||
|
||||
|
@ -214,7 +214,7 @@ function grid_thermal_spectral_solution(Delta_t) result(solution)
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
! updating thermal state
|
||||
ce = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1)
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
|
||||
ce = ce + 1
|
||||
call homogenization_thermal_setField(T_current(i,j,k),(T_current(i,j,k)-T_lastInc(i,j,k))/params%Delta_t,ce)
|
||||
end do; end do; end do
|
||||
|
@ -257,7 +257,7 @@ subroutine grid_thermal_spectral_forward(cutBack)
|
|||
call DMDAVecRestoreArrayF90(dm_local,solution_vec,T_PETSc,err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
ce = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1)
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
|
||||
ce = ce + 1
|
||||
call homogenization_thermal_setField(T_current(i,j,k),(T_current(i,j,k)-T_lastInc(i,j,k))/params%Delta_t,ce)
|
||||
end do; end do; end do
|
||||
|
@ -279,9 +279,9 @@ subroutine grid_thermal_spectral_restartWrite
|
|||
integer(HID_T) :: fileHandle, groupHandle
|
||||
PetscScalar, dimension(:,:,:), pointer :: T
|
||||
|
||||
call SNESGetDM(SNES_thermal,dm_local,err_PETSc);
|
||||
call SNESGetDM(SNES_thermal,dm_local,err_PETSc);
|
||||
CHKERRQ(err_PETSc)
|
||||
call DMDAVecGetArrayF90(dm_local,solution_vec,T,err_PETSc);
|
||||
call DMDAVecGetArrayF90(dm_local,solution_vec,T,err_PETSc);
|
||||
CHKERRQ(err_PETSc)
|
||||
|
||||
print'(1x,a)', 'writing thermal solver data required for restart to file'; flush(IO_STDOUT)
|
||||
|
@ -293,7 +293,7 @@ subroutine grid_thermal_spectral_restartWrite
|
|||
call HDF5_closeGroup(groupHandle)
|
||||
call HDF5_closeFile(fileHandle)
|
||||
|
||||
call DMDAVecRestoreArrayF90(dm_local,solution_vec,T,err_PETSc);
|
||||
call DMDAVecRestoreArrayF90(dm_local,solution_vec,T,err_PETSc);
|
||||
CHKERRQ(err_PETSc)
|
||||
|
||||
end subroutine grid_thermal_spectral_restartWrite
|
||||
|
@ -321,12 +321,12 @@ subroutine formResidual(in,x_scal,r,dummy,err_PETSc)
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
! evaluate polarization field
|
||||
scalarField_real = 0.0_pReal
|
||||
scalarField_real(1:grid(1),1:grid(2),1:grid3) = T_current
|
||||
scalarField_real(1:cells(1),1:cells(2),1:cells3) = T_current
|
||||
call utilities_FFTscalarForward
|
||||
call utilities_fourierScalarGradient !< calculate gradient of temperature field
|
||||
call utilities_FFTvectorBackward
|
||||
ce = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1)
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
|
||||
ce = ce + 1
|
||||
vectorField_real(1:3,i,j,k) = matmul(homogenization_K_T(ce) - K_ref, vectorField_real(1:3,i,j,k))
|
||||
end do; end do; end do
|
||||
|
@ -334,7 +334,7 @@ subroutine formResidual(in,x_scal,r,dummy,err_PETSc)
|
|||
call utilities_fourierVectorDivergence !< calculate temperature divergence in fourier field
|
||||
call utilities_FFTscalarBackward
|
||||
ce = 0
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1)
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1,cells(1)
|
||||
ce = ce + 1
|
||||
scalarField_real(i,j,k) = params%Delta_t*(scalarField_real(i,j,k) + homogenization_f_T(ce)) &
|
||||
+ homogenization_mu_T(ce) * (T_lastInc(i,j,k) - T_current(i,j,k)) &
|
||||
|
@ -349,7 +349,7 @@ subroutine formResidual(in,x_scal,r,dummy,err_PETSc)
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! constructing residual
|
||||
r = T_current - scalarField_real(1:grid(1),1:grid(2),1:grid3)
|
||||
r = T_current - scalarField_real(1:cells(1),1:cells(2),1:cells3)
|
||||
err_PETSc = 0
|
||||
|
||||
end subroutine formResidual
|
||||
|
@ -366,7 +366,7 @@ subroutine updateReference()
|
|||
|
||||
K_ref = 0.0_pReal
|
||||
mu_ref = 0.0_pReal
|
||||
do ce = 1, product(grid(1:2))*grid3
|
||||
do ce = 1, product(cells(1:2))*cells3
|
||||
K_ref = K_ref + homogenization_K_T(ce)
|
||||
mu_ref = mu_ref + homogenization_mu_T(ce)
|
||||
end do
|
||||
|
|
|
@ -29,9 +29,9 @@ module spectral_utilities
|
|||
include 'fftw3-mpi.f03'
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! grid related information information
|
||||
! grid related information
|
||||
real(pReal), protected, public :: wgt !< weighting factor 1/Nelems
|
||||
integer, protected, public :: grid1Red !< grid(1)/2
|
||||
integer, protected, public :: grid1Red !< cells(1)/2
|
||||
real(pReal), protected, public, dimension(3) :: scaledGeomSize !< scaled geometry size for calculation of divergence
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
|
@ -86,7 +86,7 @@ module spectral_utilities
|
|||
type, public :: tSolutionParams
|
||||
real(pReal), dimension(3,3) :: stress_BC
|
||||
logical, dimension(3,3) :: stress_mask
|
||||
type(rotation) :: rotation_BC
|
||||
type(tRotation) :: rotation_BC
|
||||
real(pReal) :: Delta_t
|
||||
end type tSolutionParams
|
||||
|
||||
|
@ -201,8 +201,8 @@ subroutine spectral_utilities_init
|
|||
num_grid%get_asString('PETSc_options',defaultVal=''),err_PETSc)
|
||||
CHKERRQ(err_PETSc)
|
||||
|
||||
grid1Red = grid(1)/2 + 1
|
||||
wgt = 1.0/real(product(grid),pReal)
|
||||
grid1Red = cells(1)/2 + 1
|
||||
wgt = 1.0/real(product(cells),pReal)
|
||||
|
||||
num%memory_efficient = num_grid%get_asInt('memory_efficient', defaultVal=1) > 0 ! ToDo: should be logical in YAML file
|
||||
num%divergence_correction = num_grid%get_asInt('divergence_correction', defaultVal=2)
|
||||
|
@ -231,9 +231,9 @@ subroutine spectral_utilities_init
|
|||
enddo
|
||||
elseif (num%divergence_correction == 2) then
|
||||
do j = 1, 3
|
||||
if ( j /= int(minloc(geomSize/real(grid,pReal),1)) &
|
||||
.and. j /= int(maxloc(geomSize/real(grid,pReal),1))) &
|
||||
scaledGeomSize = geomSize/geomSize(j)*real(grid(j),pReal)
|
||||
if ( j /= int(minloc(geomSize/real(cells,pReal),1)) &
|
||||
.and. j /= int(maxloc(geomSize/real(cells,pReal),1))) &
|
||||
scaledGeomSize = geomSize/geomSize(j)*real(cells(j),pReal)
|
||||
enddo
|
||||
else
|
||||
scaledGeomSize = geomSize
|
||||
|
@ -262,11 +262,11 @@ subroutine spectral_utilities_init
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! MPI allocation
|
||||
gridFFTW = int(grid,C_INTPTR_T)
|
||||
gridFFTW = int(cells,C_INTPTR_T)
|
||||
alloc_local = fftw_mpi_local_size_3d(gridFFTW(3), gridFFTW(2), gridFFTW(1)/2 +1, &
|
||||
PETSC_COMM_WORLD, local_K, local_K_offset)
|
||||
allocate (xi1st (3,grid1Red,grid(2),grid3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for first derivatives, only half the size for first dimension
|
||||
allocate (xi2nd (3,grid1Red,grid(2),grid3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for second derivatives, only half the size for first dimension
|
||||
allocate (xi1st (3,grid1Red,cells(2),cells3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for first derivatives, only half the size for first dimension
|
||||
allocate (xi2nd (3,grid1Red,cells(2),cells3),source = cmplx(0.0_pReal,0.0_pReal,pReal)) ! frequencies for second derivatives, only half the size for first dimension
|
||||
|
||||
tensorField = fftw_alloc_complex(tensorSize*alloc_local)
|
||||
call c_f_pointer(tensorField, tensorField_real, [3_C_INTPTR_T,3_C_INTPTR_T, &
|
||||
|
@ -327,27 +327,27 @@ subroutine spectral_utilities_init
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! calculation of discrete angular frequencies, ordered as in FFTW (wrap around)
|
||||
do k = grid3Offset+1, grid3Offset+grid3
|
||||
do k = cells3Offset+1, cells3Offset+cells3
|
||||
k_s(3) = k - 1
|
||||
if (k > grid(3)/2 + 1) k_s(3) = k_s(3) - grid(3) ! running from 0,1,...,N/2,N/2+1,-N/2,-N/2+1,...,-1
|
||||
do j = 1, grid(2)
|
||||
if (k > cells(3)/2 + 1) k_s(3) = k_s(3) - cells(3) ! running from 0,1,...,N/2,N/2+1,-N/2,-N/2+1,...,-1
|
||||
do j = 1, cells(2)
|
||||
k_s(2) = j - 1
|
||||
if (j > grid(2)/2 + 1) k_s(2) = k_s(2) - grid(2) ! running from 0,1,...,N/2,N/2+1,-N/2,-N/2+1,...,-1
|
||||
if (j > cells(2)/2 + 1) k_s(2) = k_s(2) - cells(2) ! running from 0,1,...,N/2,N/2+1,-N/2,-N/2+1,...,-1
|
||||
do i = 1, grid1Red
|
||||
k_s(1) = i - 1 ! symmetry, junst running from 0,1,...,N/2,N/2+1
|
||||
xi2nd(1:3,i,j,k-grid3Offset) = utilities_getFreqDerivative(k_s)
|
||||
where(mod(grid,2)==0 .and. [i,j,k] == grid/2+1 .and. &
|
||||
xi2nd(1:3,i,j,k-cells3Offset) = utilities_getFreqDerivative(k_s)
|
||||
where(mod(cells,2)==0 .and. [i,j,k] == cells/2+1 .and. &
|
||||
spectral_derivative_ID == DERIVATIVE_CONTINUOUS_ID) ! for even grids, set the Nyquist Freq component to 0.0
|
||||
xi1st(1:3,i,j,k-grid3Offset) = cmplx(0.0_pReal,0.0_pReal,pReal)
|
||||
xi1st(1:3,i,j,k-cells3Offset) = cmplx(0.0_pReal,0.0_pReal,pReal)
|
||||
elsewhere
|
||||
xi1st(1:3,i,j,k-grid3Offset) = xi2nd(1:3,i,j,k-grid3Offset)
|
||||
xi1st(1:3,i,j,k-cells3Offset) = xi2nd(1:3,i,j,k-cells3Offset)
|
||||
endwhere
|
||||
enddo; enddo; enddo
|
||||
|
||||
if (num%memory_efficient) then ! allocate just single fourth order tensor
|
||||
if (num%memory_efficient) then ! allocate just single fourth order tensor
|
||||
allocate (gamma_hat(3,3,3,3,1,1,1), source = cmplx(0.0_pReal,0.0_pReal,pReal))
|
||||
else ! precalculation of gamma_hat field
|
||||
allocate (gamma_hat(3,3,3,3,grid1Red,grid(2),grid3), source = cmplx(0.0_pReal,0.0_pReal,pReal))
|
||||
allocate (gamma_hat(3,3,3,3,grid1Red,cells(2),cells3), source = cmplx(0.0_pReal,0.0_pReal,pReal))
|
||||
endif
|
||||
|
||||
end subroutine spectral_utilities_init
|
||||
|
@ -373,10 +373,10 @@ subroutine utilities_updateGamma(C)
|
|||
|
||||
if (.not. num%memory_efficient) then
|
||||
gamma_hat = cmplx(0.0_pReal,0.0_pReal,pReal) ! for the singular point and any non invertible A
|
||||
do k = grid3Offset+1, grid3Offset+grid3; do j = 1, grid(2); do i = 1, grid1Red
|
||||
do k = cells3Offset+1, cells3Offset+cells3; do j = 1, cells(2); do i = 1, grid1Red
|
||||
if (any([i,j,k] /= 1)) then ! singular point at xi=(0.0,0.0,0.0) i.e. i=j=k=1
|
||||
do concurrent (l = 1:3, m = 1:3)
|
||||
xiDyad_cmplx(l,m) = conjg(-xi1st(l,i,j,k-grid3Offset))*xi1st(m,i,j,k-grid3Offset)
|
||||
xiDyad_cmplx(l,m) = conjg(-xi1st(l,i,j,k-cells3Offset))*xi1st(m,i,j,k-cells3Offset)
|
||||
end do
|
||||
do concurrent(l = 1:3, m = 1:3)
|
||||
temp33_complex(l,m) = sum(cmplx(C_ref(l,1:3,m,1:3),0.0_pReal)*xiDyad_cmplx)
|
||||
|
@ -387,8 +387,8 @@ subroutine utilities_updateGamma(C)
|
|||
call math_invert(A_inv, err, A)
|
||||
temp33_complex = cmplx(A_inv(1:3,1:3),A_inv(1:3,4:6),pReal)
|
||||
do concurrent(l=1:3, m=1:3, n=1:3, o=1:3)
|
||||
gamma_hat(l,m,n,o,i,j,k-grid3Offset) = temp33_complex(l,n)* &
|
||||
conjg(-xi1st(o,i,j,k-grid3Offset))*xi1st(m,i,j,k-grid3Offset)
|
||||
gamma_hat(l,m,n,o,i,j,k-cells3Offset) = temp33_complex(l,n)* &
|
||||
conjg(-xi1st(o,i,j,k-cells3Offset))*xi1st(m,i,j,k-cells3Offset)
|
||||
end do
|
||||
end if
|
||||
end if
|
||||
|
@ -405,7 +405,7 @@ end subroutine utilities_updateGamma
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine utilities_FFTtensorForward
|
||||
|
||||
tensorField_real(1:3,1:3,grid(1)+1:grid1Red*2,:,:) = 0.0_pReal
|
||||
tensorField_real(1:3,1:3,cells(1)+1:grid1Red*2,:,:) = 0.0_pReal
|
||||
call fftw_mpi_execute_dft_r2c(planTensorForth,tensorField_real,tensorField_fourier)
|
||||
|
||||
end subroutine utilities_FFTtensorForward
|
||||
|
@ -429,7 +429,7 @@ end subroutine utilities_FFTtensorBackward
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine utilities_FFTscalarForward
|
||||
|
||||
scalarField_real(grid(1)+1:grid1Red*2,:,:) = 0.0_pReal
|
||||
scalarField_real(cells(1)+1:grid1Red*2,:,:) = 0.0_pReal
|
||||
call fftw_mpi_execute_dft_r2c(planScalarForth,scalarField_real,scalarField_fourier)
|
||||
|
||||
end subroutine utilities_FFTscalarForward
|
||||
|
@ -454,7 +454,7 @@ end subroutine utilities_FFTscalarBackward
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine utilities_FFTvectorForward
|
||||
|
||||
vectorField_real(1:3,grid(1)+1:grid1Red*2,:,:) = 0.0_pReal
|
||||
vectorField_real(1:3,cells(1)+1:grid1Red*2,:,:) = 0.0_pReal
|
||||
call fftw_mpi_execute_dft_r2c(planVectorForth,vectorField_real,vectorField_fourier)
|
||||
|
||||
end subroutine utilities_FFTvectorForward
|
||||
|
@ -493,8 +493,8 @@ subroutine utilities_fourierGammaConvolution(fieldAim)
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
! do the actual spectral method calculation (mechanical equilibrium)
|
||||
memoryEfficient: if (num%memory_efficient) then
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1, grid1Red
|
||||
if (any([i,j,k+grid3Offset] /= 1)) then ! singular point at xi=(0.0,0.0,0.0) i.e. i=j=k=1
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1, grid1Red
|
||||
if (any([i,j,k+cells3Offset] /= 1)) then ! singular point at xi=(0.0,0.0,0.0) i.e. i=j=k=1
|
||||
do concurrent(l = 1:3, m = 1:3)
|
||||
xiDyad_cmplx(l,m) = conjg(-xi1st(l,i,j,k))*xi1st(m,i,j,k)
|
||||
end do
|
||||
|
@ -519,7 +519,7 @@ subroutine utilities_fourierGammaConvolution(fieldAim)
|
|||
end if
|
||||
end do; end do; end do
|
||||
else memoryEfficient
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1,grid1Red
|
||||
do concurrent(l = 1:3, m = 1:3)
|
||||
temp33_Complex(l,m) = sum(gamma_hat(l,m,1:3,1:3,i,j,k) * tensorField_fourier(1:3,1:3,i,j,k))
|
||||
end do
|
||||
|
@ -527,7 +527,7 @@ subroutine utilities_fourierGammaConvolution(fieldAim)
|
|||
end do; end do; end do
|
||||
end if memoryEfficient
|
||||
|
||||
if (grid3Offset == 0) tensorField_fourier(1:3,1:3,1,1,1) = cmplx(fieldAim/wgt,0.0_pReal,pReal)
|
||||
if (cells3Offset == 0) tensorField_fourier(1:3,1:3,1,1,1) = cmplx(fieldAim/wgt,0.0_pReal,pReal)
|
||||
|
||||
end subroutine utilities_fourierGammaConvolution
|
||||
|
||||
|
@ -544,7 +544,7 @@ subroutine utilities_fourierGreenConvolution(D_ref, mu_ref, Delta_t)
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! do the actual spectral method calculation
|
||||
do k = 1, grid3; do j = 1, grid(2) ;do i = 1, grid1Red
|
||||
do k = 1, cells3; do j = 1, cells(2) ;do i = 1, grid1Red
|
||||
GreenOp_hat = cmplx(1.0_pReal,0.0_pReal,pReal) &
|
||||
/ (cmplx(mu_ref,0.0_pReal,pReal) + cmplx(Delta_t,0.0_pReal) &
|
||||
* sum(conjg(xi1st(1:3,i,j,k))* matmul(cmplx(D_ref,0.0_pReal),xi1st(1:3,i,j,k))))
|
||||
|
@ -571,7 +571,7 @@ real(pReal) function utilities_divergenceRMS()
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
! calculating RMS divergence criterion in Fourier space
|
||||
utilities_divergenceRMS = 0.0_pReal
|
||||
do k = 1, grid3; do j = 1, grid(2)
|
||||
do k = 1, cells3; do j = 1, cells(2)
|
||||
do i = 2, grid1Red -1 ! Has somewhere a conj. complex counterpart. Therefore count it twice.
|
||||
utilities_divergenceRMS = utilities_divergenceRMS &
|
||||
+ 2.0_pReal*(sum (real(matmul(tensorField_fourier(1:3,1:3,i,j,k), & ! (sqrt(real(a)**2 + aimag(a)**2))**2 = real(a)**2 + aimag(a)**2, i.e. do not take square root and square again
|
||||
|
@ -579,7 +579,7 @@ real(pReal) function utilities_divergenceRMS()
|
|||
+sum(aimag(matmul(tensorField_fourier(1:3,1:3,i,j,k),&
|
||||
conjg(-xi1st(1:3,i,j,k))*rescaledGeom))**2))
|
||||
enddo
|
||||
utilities_divergenceRMS = utilities_divergenceRMS & ! these two layers (DC and Nyquist) do not have a conjugate complex counterpart (if grid(1) /= 1)
|
||||
utilities_divergenceRMS = utilities_divergenceRMS & ! these two layers (DC and Nyquist) do not have a conjugate complex counterpart (if cells(1) /= 1)
|
||||
+ sum( real(matmul(tensorField_fourier(1:3,1:3,1 ,j,k), &
|
||||
conjg(-xi1st(1:3,1,j,k))*rescaledGeom))**2) &
|
||||
+ sum(aimag(matmul(tensorField_fourier(1:3,1:3,1 ,j,k), &
|
||||
|
@ -589,7 +589,7 @@ real(pReal) function utilities_divergenceRMS()
|
|||
+ sum(aimag(matmul(tensorField_fourier(1:3,1:3,grid1Red,j,k), &
|
||||
conjg(-xi1st(1:3,grid1Red,j,k))*rescaledGeom))**2)
|
||||
enddo; enddo
|
||||
if (grid(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
|
||||
if (cells(1) == 1) utilities_divergenceRMS = utilities_divergenceRMS * 0.5_pReal ! counted twice in case of cells(1) == 1
|
||||
call MPI_Allreduce(MPI_IN_PLACE,utilities_divergenceRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
utilities_divergenceRMS = sqrt(utilities_divergenceRMS) * wgt ! RMS in real space calculated with Parsevals theorem from Fourier space
|
||||
|
@ -616,7 +616,7 @@ real(pReal) function utilities_curlRMS()
|
|||
! calculating max curl criterion in Fourier space
|
||||
utilities_curlRMS = 0.0_pReal
|
||||
|
||||
do k = 1, grid3; do j = 1, grid(2);
|
||||
do k = 1, cells3; do j = 1, cells(2);
|
||||
do i = 2, grid1Red - 1
|
||||
do l = 1, 3
|
||||
curl_fourier(l,1) = (+tensorField_fourier(l,3,i,j,k)*xi1st(2,i,j,k)*rescaledGeom(2) &
|
||||
|
@ -638,7 +638,7 @@ real(pReal) function utilities_curlRMS()
|
|||
-tensorField_fourier(l,1,1,j,k)*xi1st(2,1,j,k)*rescaledGeom(2))
|
||||
enddo
|
||||
utilities_curlRMS = utilities_curlRMS &
|
||||
+ sum(curl_fourier%re**2 + curl_fourier%im**2) ! this layer (DC) does not have a conjugate complex counterpart (if grid(1) /= 1)
|
||||
+ sum(curl_fourier%re**2 + curl_fourier%im**2) ! this layer (DC) does not have a conjugate complex counterpart (if cells(1) /= 1)
|
||||
do l = 1, 3
|
||||
curl_fourier = (+tensorField_fourier(l,3,grid1Red,j,k)*xi1st(2,grid1Red,j,k)*rescaledGeom(2) &
|
||||
-tensorField_fourier(l,2,grid1Red,j,k)*xi1st(3,grid1Red,j,k)*rescaledGeom(3))
|
||||
|
@ -648,13 +648,13 @@ real(pReal) function utilities_curlRMS()
|
|||
-tensorField_fourier(l,1,grid1Red,j,k)*xi1st(2,grid1Red,j,k)*rescaledGeom(2))
|
||||
enddo
|
||||
utilities_curlRMS = utilities_curlRMS &
|
||||
+ sum(curl_fourier%re**2 + curl_fourier%im**2) ! this layer (Nyquist) does not have a conjugate complex counterpart (if grid(1) /= 1)
|
||||
+ sum(curl_fourier%re**2 + curl_fourier%im**2) ! this layer (Nyquist) does not have a conjugate complex counterpart (if cells(1) /= 1)
|
||||
enddo; enddo
|
||||
|
||||
call MPI_Allreduce(MPI_IN_PLACE,utilities_curlRMS,1_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
utilities_curlRMS = sqrt(utilities_curlRMS) * wgt
|
||||
if (grid(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of grid(1) == 1
|
||||
if (cells(1) == 1) utilities_curlRMS = utilities_curlRMS * 0.5_pReal ! counted twice in case of cells(1) == 1
|
||||
|
||||
end function utilities_curlRMS
|
||||
|
||||
|
@ -666,7 +666,7 @@ function utilities_maskedCompliance(rot_BC,mask_stress,C)
|
|||
|
||||
real(pReal), dimension(3,3,3,3) :: utilities_maskedCompliance !< masked compliance
|
||||
real(pReal), intent(in), dimension(3,3,3,3) :: C !< current average stiffness
|
||||
type(rotation), intent(in) :: rot_BC !< rotation of load frame
|
||||
type(tRotation), intent(in) :: rot_BC !< rotation of load frame
|
||||
logical, intent(in), dimension(3,3) :: mask_stress !< mask of stress BC
|
||||
|
||||
integer :: i, j
|
||||
|
@ -736,7 +736,7 @@ subroutine utilities_fourierScalarGradient()
|
|||
|
||||
integer :: i, j, k
|
||||
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1,grid1Red
|
||||
vectorField_fourier(1:3,i,j,k) = scalarField_fourier(i,j,k)*xi1st(1:3,i,j,k) ! ToDo: no -conjg?
|
||||
enddo; enddo; enddo
|
||||
|
||||
|
@ -750,7 +750,7 @@ subroutine utilities_fourierVectorDivergence()
|
|||
|
||||
integer :: i, j, k
|
||||
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1,grid1Red
|
||||
scalarField_fourier(i,j,k) = sum(vectorField_fourier(1:3,i,j,k)*conjg(-xi1st(1:3,i,j,k)))
|
||||
enddo; enddo; enddo
|
||||
|
||||
|
@ -764,7 +764,7 @@ subroutine utilities_fourierVectorGradient()
|
|||
|
||||
integer :: i, j, k, m, n
|
||||
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1,grid1Red
|
||||
do m = 1, 3; do n = 1, 3
|
||||
tensorField_fourier(m,n,i,j,k) = vectorField_fourier(m,i,j,k)*xi1st(n,i,j,k)
|
||||
enddo; enddo
|
||||
|
@ -780,7 +780,7 @@ subroutine utilities_fourierTensorDivergence()
|
|||
|
||||
integer :: i, j, k
|
||||
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1,grid1Red
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1,grid1Red
|
||||
vectorField_fourier(:,i,j,k) = matmul(tensorField_fourier(:,:,i,j,k),conjg(-xi1st(:,i,j,k)))
|
||||
enddo; enddo; enddo
|
||||
|
||||
|
@ -795,10 +795,10 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
|
|||
|
||||
real(pReal), intent(out), dimension(3,3,3,3) :: C_volAvg, C_minmaxAvg !< average stiffness
|
||||
real(pReal), intent(out), dimension(3,3) :: P_av !< average PK stress
|
||||
real(pReal), intent(out), dimension(3,3,grid(1),grid(2),grid3) :: P !< PK stress
|
||||
real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: F !< deformation gradient target
|
||||
real(pReal), intent(out), dimension(3,3,cells(1),cells(2),cells3) :: P !< PK stress
|
||||
real(pReal), intent(in), dimension(3,3,cells(1),cells(2),cells3) :: F !< deformation gradient target
|
||||
real(pReal), intent(in) :: Delta_t !< loading time
|
||||
type(rotation), intent(in), optional :: rotation_BC !< rotation of load frame
|
||||
type(tRotation), intent(in), optional :: rotation_BC !< rotation of load frame
|
||||
|
||||
|
||||
integer :: i
|
||||
|
@ -810,15 +810,15 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
|
|||
print'(/,1x,a)', '... evaluating constitutive response ......................................'
|
||||
flush(IO_STDOUT)
|
||||
|
||||
homogenization_F = reshape(F,[3,3,product(grid(1:2))*grid3]) ! set materialpoint target F to estimated field
|
||||
homogenization_F = reshape(F,[3,3,product(cells(1:2))*cells3]) ! set materialpoint target F to estimated field
|
||||
|
||||
call homogenization_mechanical_response(Delta_t,[1,1],[1,product(grid(1:2))*grid3]) ! calculate P field
|
||||
call homogenization_mechanical_response(Delta_t,[1,1],[1,product(cells(1:2))*cells3]) ! calculate P field
|
||||
if (.not. terminallyIll) &
|
||||
call homogenization_thermal_response(Delta_t,[1,1],[1,product(grid(1:2))*grid3])
|
||||
call homogenization_thermal_response(Delta_t,[1,1],[1,product(cells(1:2))*cells3])
|
||||
if (.not. terminallyIll) &
|
||||
call homogenization_mechanical_response2(Delta_t,[1,1],[1,product(grid(1:2))*grid3])
|
||||
call homogenization_mechanical_response2(Delta_t,[1,1],[1,product(cells(1:2))*cells3])
|
||||
|
||||
P = reshape(homogenization_P, [3,3,grid(1),grid(2),grid3])
|
||||
P = reshape(homogenization_P, [3,3,cells(1),cells(2),cells3])
|
||||
P_av = sum(sum(sum(P,dim=5),dim=4),dim=3) * wgt
|
||||
call MPI_Allreduce(MPI_IN_PLACE,P_av,9_MPI_INTEGER_KIND,MPI_DOUBLE,MPI_SUM,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
@ -833,7 +833,7 @@ subroutine utilities_constitutiveResponse(P,P_av,C_volAvg,C_minmaxAvg,&
|
|||
dPdF_norm_max = 0.0_pReal
|
||||
dPdF_min = huge(1.0_pReal)
|
||||
dPdF_norm_min = huge(1.0_pReal)
|
||||
do i = 1, product(grid(1:2))*grid3
|
||||
do i = 1, product(cells(1:2))*cells3
|
||||
if (dPdF_norm_max < sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2)) then
|
||||
dPdF_max = homogenization_dPdF(1:3,1:3,1:3,1:3,i)
|
||||
dPdF_norm_max = sum(homogenization_dPdF(1:3,1:3,1:3,1:3,i)**2)
|
||||
|
@ -878,16 +878,16 @@ pure function utilities_calculateRate(heterogeneous,field0,field,dt,avRate)
|
|||
dt !< Delta_t between field0 and field
|
||||
logical, intent(in) :: &
|
||||
heterogeneous !< calculate field of rates
|
||||
real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: &
|
||||
real(pReal), intent(in), dimension(3,3,cells(1),cells(2),cells3) :: &
|
||||
field0, & !< data of previous step
|
||||
field !< data of current step
|
||||
real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: &
|
||||
real(pReal), dimension(3,3,cells(1),cells(2),cells3) :: &
|
||||
utilities_calculateRate
|
||||
|
||||
if (heterogeneous) then
|
||||
utilities_calculateRate = (field-field0) / dt
|
||||
else
|
||||
utilities_calculateRate = spread(spread(spread(avRate,3,grid(1)),4,grid(2)),5,grid3)
|
||||
utilities_calculateRate = spread(spread(spread(avRate,3,cells(1)),4,cells(2)),5,cells3)
|
||||
endif
|
||||
|
||||
end function utilities_calculateRate
|
||||
|
@ -901,12 +901,12 @@ function utilities_forwardField(Delta_t,field_lastInc,rate,aim)
|
|||
|
||||
real(pReal), intent(in) :: &
|
||||
Delta_t !< Delta_t of current step
|
||||
real(pReal), intent(in), dimension(3,3,grid(1),grid(2),grid3) :: &
|
||||
real(pReal), intent(in), dimension(3,3,cells(1),cells(2),cells3) :: &
|
||||
field_lastInc, & !< initial field
|
||||
rate !< rate by which to forward
|
||||
real(pReal), intent(in), optional, dimension(3,3) :: &
|
||||
aim !< average field value aim
|
||||
real(pReal), dimension(3,3,grid(1),grid(2),grid3) :: &
|
||||
real(pReal), dimension(3,3,cells(1),cells(2),cells3) :: &
|
||||
utilities_forwardField
|
||||
real(pReal), dimension(3,3) :: fieldDiff !< <a + adot*t> - aim
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI
|
||||
|
@ -918,7 +918,7 @@ function utilities_forwardField(Delta_t,field_lastInc,rate,aim)
|
|||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
fieldDiff = fieldDiff - aim
|
||||
utilities_forwardField = utilities_forwardField - &
|
||||
spread(spread(spread(fieldDiff,3,grid(1)),4,grid(2)),5,grid3)
|
||||
spread(spread(spread(fieldDiff,3,cells(1)),4,cells(2)),5,cells3)
|
||||
endif
|
||||
|
||||
end function utilities_forwardField
|
||||
|
@ -936,37 +936,37 @@ pure function utilities_getFreqDerivative(k_s)
|
|||
|
||||
select case (spectral_derivative_ID)
|
||||
case (DERIVATIVE_CONTINUOUS_ID)
|
||||
utilities_getFreqDerivative = cmplx(0.0_pReal, 2.0_pReal*PI*real(k_s,pReal)/geomSize,pReal)
|
||||
utilities_getFreqDerivative = cmplx(0.0_pReal, TAU*real(k_s,pReal)/geomSize,pReal)
|
||||
|
||||
case (DERIVATIVE_CENTRAL_DIFF_ID)
|
||||
utilities_getFreqDerivative = cmplx(0.0_pReal, sin(2.0_pReal*PI*real(k_s,pReal)/real(grid,pReal)), pReal)/ &
|
||||
cmplx(2.0_pReal*geomSize/real(grid,pReal), 0.0_pReal, pReal)
|
||||
utilities_getFreqDerivative = cmplx(0.0_pReal, sin(TAU*real(k_s,pReal)/real(cells,pReal)), pReal)/ &
|
||||
cmplx(2.0_pReal*geomSize/real(cells,pReal), 0.0_pReal, pReal)
|
||||
|
||||
case (DERIVATIVE_FWBW_DIFF_ID)
|
||||
utilities_getFreqDerivative(1) = &
|
||||
cmplx(cos(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)) - 1.0_pReal, &
|
||||
sin(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)), pReal)* &
|
||||
cmplx(cos(2.0_pReal*PI*real(k_s(2),pReal)/real(grid(2),pReal)) + 1.0_pReal, &
|
||||
sin(2.0_pReal*PI*real(k_s(2),pReal)/real(grid(2),pReal)), pReal)* &
|
||||
cmplx(cos(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)) + 1.0_pReal, &
|
||||
sin(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)), pReal)/ &
|
||||
cmplx(4.0_pReal*geomSize(1)/real(grid(1),pReal), 0.0_pReal, pReal)
|
||||
cmplx(cos(TAU*real(k_s(1),pReal)/real(cells(1),pReal)) - 1.0_pReal, &
|
||||
sin(TAU*real(k_s(1),pReal)/real(cells(1),pReal)), pReal)* &
|
||||
cmplx(cos(TAU*real(k_s(2),pReal)/real(cells(2),pReal)) + 1.0_pReal, &
|
||||
sin(TAU*real(k_s(2),pReal)/real(cells(2),pReal)), pReal)* &
|
||||
cmplx(cos(TAU*real(k_s(3),pReal)/real(cells(3),pReal)) + 1.0_pReal, &
|
||||
sin(TAU*real(k_s(3),pReal)/real(cells(3),pReal)), pReal)/ &
|
||||
cmplx(4.0_pReal*geomSize(1)/real(cells(1),pReal), 0.0_pReal, pReal)
|
||||
utilities_getFreqDerivative(2) = &
|
||||
cmplx(cos(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)) + 1.0_pReal, &
|
||||
sin(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)), pReal)* &
|
||||
cmplx(cos(2.0_pReal*PI*real(k_s(2),pReal)/real(grid(2),pReal)) - 1.0_pReal, &
|
||||
sin(2.0_pReal*PI*real(k_s(2),pReal)/real(grid(2),pReal)), pReal)* &
|
||||
cmplx(cos(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)) + 1.0_pReal, &
|
||||
sin(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)), pReal)/ &
|
||||
cmplx(4.0_pReal*geomSize(2)/real(grid(2),pReal), 0.0_pReal, pReal)
|
||||
cmplx(cos(TAU*real(k_s(1),pReal)/real(cells(1),pReal)) + 1.0_pReal, &
|
||||
sin(TAU*real(k_s(1),pReal)/real(cells(1),pReal)), pReal)* &
|
||||
cmplx(cos(TAU*real(k_s(2),pReal)/real(cells(2),pReal)) - 1.0_pReal, &
|
||||
sin(TAU*real(k_s(2),pReal)/real(cells(2),pReal)), pReal)* &
|
||||
cmplx(cos(TAU*real(k_s(3),pReal)/real(cells(3),pReal)) + 1.0_pReal, &
|
||||
sin(TAU*real(k_s(3),pReal)/real(cells(3),pReal)), pReal)/ &
|
||||
cmplx(4.0_pReal*geomSize(2)/real(cells(2),pReal), 0.0_pReal, pReal)
|
||||
utilities_getFreqDerivative(3) = &
|
||||
cmplx(cos(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)) + 1.0_pReal, &
|
||||
sin(2.0_pReal*PI*real(k_s(1),pReal)/real(grid(1),pReal)), pReal)* &
|
||||
cmplx(cos(2.0_pReal*PI*real(k_s(2),pReal)/real(grid(2),pReal)) + 1.0_pReal, &
|
||||
sin(2.0_pReal*PI*real(k_s(2),pReal)/real(grid(2),pReal)), pReal)* &
|
||||
cmplx(cos(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)) - 1.0_pReal, &
|
||||
sin(2.0_pReal*PI*real(k_s(3),pReal)/real(grid(3),pReal)), pReal)/ &
|
||||
cmplx(4.0_pReal*geomSize(3)/real(grid(3),pReal), 0.0_pReal, pReal)
|
||||
cmplx(cos(TAU*real(k_s(1),pReal)/real(cells(1),pReal)) + 1.0_pReal, &
|
||||
sin(TAU*real(k_s(1),pReal)/real(cells(1),pReal)), pReal)* &
|
||||
cmplx(cos(TAU*real(k_s(2),pReal)/real(cells(2),pReal)) + 1.0_pReal, &
|
||||
sin(TAU*real(k_s(2),pReal)/real(cells(2),pReal)), pReal)* &
|
||||
cmplx(cos(TAU*real(k_s(3),pReal)/real(cells(3),pReal)) - 1.0_pReal, &
|
||||
sin(TAU*real(k_s(3),pReal)/real(cells(3),pReal)), pReal)/ &
|
||||
cmplx(4.0_pReal*geomSize(3)/real(cells(3),pReal), 0.0_pReal, pReal)
|
||||
end select
|
||||
|
||||
end function utilities_getFreqDerivative
|
||||
|
@ -979,10 +979,10 @@ end function utilities_getFreqDerivative
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine utilities_updateCoords(F)
|
||||
|
||||
real(pReal), dimension(3,3,grid(1),grid(2),grid3), intent(in) :: F
|
||||
real(pReal), dimension(3, grid(1),grid(2),grid3) :: IPcoords
|
||||
real(pReal), dimension(3, grid(1),grid(2),grid3+2) :: IPfluct_padded ! Fluctuations of cell center displacement (padded along z for MPI)
|
||||
real(pReal), dimension(3, grid(1)+1,grid(2)+1,grid3+1) :: nodeCoords
|
||||
real(pReal), dimension(3,3,cells(1),cells(2),cells3), intent(in) :: F
|
||||
real(pReal), dimension(3, cells(1),cells(2),cells3) :: IPcoords
|
||||
real(pReal), dimension(3, cells(1),cells(2),cells3+2) :: IPfluct_padded ! Fluctuations of cell center displacement (padded along z for MPI)
|
||||
real(pReal), dimension(3, cells(1)+1,cells(2)+1,cells3+1) :: nodeCoords
|
||||
integer :: &
|
||||
i,j,k,n, &
|
||||
c
|
||||
|
@ -1010,14 +1010,14 @@ subroutine utilities_updateCoords(F)
|
|||
1, 1, 1, &
|
||||
0, 1, 1 ], [3,8])
|
||||
|
||||
step = geomSize/real(grid, pReal)
|
||||
step = geomSize/real(cells, pReal)
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! integration in Fourier space to get fluctuations of cell center discplacements
|
||||
tensorField_real(1:3,1:3,1:grid(1),1:grid(2),1:grid3) = F
|
||||
tensorField_real(1:3,1:3,1:cells(1),1:cells(2),1:cells3) = F
|
||||
call utilities_FFTtensorForward()
|
||||
|
||||
do k = 1, grid3; do j = 1, grid(2); do i = 1, grid1Red
|
||||
if (any([i,j,k+grid3Offset] /= 1)) then
|
||||
do k = 1, cells3; do j = 1, cells(2); do i = 1, grid1Red
|
||||
if (any([i,j,k+cells3Offset] /= 1)) then
|
||||
vectorField_fourier(1:3,i,j,k) = matmul(tensorField_fourier(1:3,1:3,i,j,k),xi2nd(1:3,i,j,k)) &
|
||||
/ sum(conjg(-xi2nd(1:3,i,j,k))*xi2nd(1:3,i,j,k)) * cmplx(wgt,0.0,pReal)
|
||||
else
|
||||
|
@ -1029,25 +1029,25 @@ subroutine utilities_updateCoords(F)
|
|||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! average F
|
||||
if (grid3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt
|
||||
if (cells3Offset == 0) Favg = real(tensorField_fourier(1:3,1:3,1,1,1),pReal)*wgt
|
||||
call MPI_Bcast(Favg,9_MPI_INTEGER_KIND,MPI_DOUBLE,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! pad cell center fluctuations along z-direction (needed when running MPI simulation)
|
||||
IPfluct_padded(1:3,1:grid(1),1:grid(2),2:grid3+1) = vectorField_real(1:3,1:grid(1),1:grid(2),1:grid3)
|
||||
c = product(shape(IPfluct_padded(:,:,:,1))) !< amount of data to transfer
|
||||
IPfluct_padded(1:3,1:cells(1),1:cells(2),2:cells3+1) = vectorField_real(1:3,1:cells(1),1:cells(2),1:cells3)
|
||||
c = product(shape(IPfluct_padded(:,:,:,1))) !< amount of data to transfer
|
||||
rank_t = modulo(worldrank+1_MPI_INTEGER_KIND,worldsize)
|
||||
rank_b = modulo(worldrank-1_MPI_INTEGER_KIND,worldsize)
|
||||
|
||||
! send bottom layer to process below
|
||||
call MPI_Isend(IPfluct_padded(:,:,:,2), c,MPI_DOUBLE,rank_b,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(1),err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call MPI_Irecv(IPfluct_padded(:,:,:,grid3+2),c,MPI_DOUBLE,rank_t,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(2),err_MPI)
|
||||
call MPI_Irecv(IPfluct_padded(:,:,:,cells3+2),c,MPI_DOUBLE,rank_t,0_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(2),err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
||||
! send top layer to process above
|
||||
call MPI_Isend(IPfluct_padded(:,:,:,grid3+1),c,MPI_DOUBLE,rank_t,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(3),err_MPI)
|
||||
call MPI_Isend(IPfluct_padded(:,:,:,cells3+1),c,MPI_DOUBLE,rank_t,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(3),err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
call MPI_Irecv(IPfluct_padded(:,:,:,1), c,MPI_DOUBLE,rank_b,1_MPI_INTEGER_KIND,MPI_COMM_WORLD,request(4),err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) error stop 'MPI error'
|
||||
|
@ -1063,24 +1063,24 @@ subroutine utilities_updateCoords(F)
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
! calculate nodal displacements
|
||||
nodeCoords = 0.0_pReal
|
||||
do k = 0,grid3; do j = 0,grid(2); do i = 0,grid(1)
|
||||
nodeCoords(1:3,i+1,j+1,k+1) = matmul(Favg,step*(real([i,j,k+grid3Offset],pReal)))
|
||||
do k = 0,cells3; do j = 0,cells(2); do i = 0,cells(1)
|
||||
nodeCoords(1:3,i+1,j+1,k+1) = matmul(Favg,step*(real([i,j,k+cells3Offset],pReal)))
|
||||
averageFluct: do n = 1,8
|
||||
me = [i+neighbor(1,n),j+neighbor(2,n),k+neighbor(3,n)]
|
||||
nodeCoords(1:3,i+1,j+1,k+1) = nodeCoords(1:3,i+1,j+1,k+1) &
|
||||
+ IPfluct_padded(1:3,modulo(me(1)-1,grid(1))+1,modulo(me(2)-1,grid(2))+1,me(3)+1)*0.125_pReal
|
||||
+ IPfluct_padded(1:3,modulo(me(1)-1,cells(1))+1,modulo(me(2)-1,cells(2))+1,me(3)+1)*0.125_pReal
|
||||
enddo averageFluct
|
||||
enddo; enddo; enddo
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
! calculate cell center displacements
|
||||
do k = 1,grid3; do j = 1,grid(2); do i = 1,grid(1)
|
||||
do k = 1,cells3; do j = 1,cells(2); do i = 1,cells(1)
|
||||
IPcoords(1:3,i,j,k) = vectorField_real(1:3,i,j,k) &
|
||||
+ matmul(Favg,step*(real([i,j,k+grid3Offset],pReal)-0.5_pReal))
|
||||
+ matmul(Favg,step*(real([i,j,k+cells3Offset],pReal)-0.5_pReal))
|
||||
enddo; enddo; enddo
|
||||
|
||||
call discretization_setNodeCoords(reshape(NodeCoords,[3,(grid(1)+1)*(grid(2)+1)*(grid3+1)]))
|
||||
call discretization_setIPcoords (reshape(IPcoords, [3,grid(1)*grid(2)*grid3]))
|
||||
call discretization_setNodeCoords(reshape(NodeCoords,[3,(cells(1)+1)*(cells(2)+1)*(cells3+1)]))
|
||||
call discretization_setIPcoords (reshape(IPcoords, [3,cells(1)*cells(2)*cells3]))
|
||||
|
||||
end subroutine utilities_updateCoords
|
||||
|
||||
|
|
|
@ -498,7 +498,7 @@ function lattice_C66_twin(Ntwin,C66,lattice,CoverA)
|
|||
real(pReal), dimension(6,6,sum(Ntwin)) :: lattice_C66_twin
|
||||
|
||||
real(pReal), dimension(3,3,sum(Ntwin)):: coordinateSystem
|
||||
type(rotation) :: R
|
||||
type(tRotation) :: R
|
||||
integer :: i
|
||||
|
||||
|
||||
|
@ -537,7 +537,7 @@ function lattice_C66_trans(Ntrans,C_parent66,lattice_target, &
|
|||
|
||||
real(pReal), dimension(6,6) :: C_bar66, C_target_unrotated66
|
||||
real(pReal), dimension(3,3,sum(Ntrans)) :: Q,S
|
||||
type(rotation) :: R
|
||||
type(tRotation) :: R
|
||||
real(pReal) :: a_bcc, a_fcc, cOverA_trans
|
||||
integer :: i
|
||||
|
||||
|
@ -599,7 +599,7 @@ function lattice_nonSchmidMatrix(Nslip,nonSchmidCoefficients,sense) result(nonSc
|
|||
|
||||
real(pReal), dimension(1:3,1:3,sum(Nslip)) :: coordinateSystem !< coordinate system of slip system
|
||||
real(pReal), dimension(3) :: direction, normal, np
|
||||
type(rotation) :: R
|
||||
type(tRotation) :: R
|
||||
integer :: i
|
||||
|
||||
|
||||
|
@ -1976,7 +1976,7 @@ subroutine buildTransformationSystem(Q,S,Ntrans,cOverA,a_fcc,a_bcc)
|
|||
a_bcc, & !< lattice parameter a for bcc target lattice
|
||||
a_fcc !< lattice parameter a for fcc parent lattice
|
||||
|
||||
type(rotation) :: &
|
||||
type(tRotation) :: &
|
||||
R, & !< Pitsch rotation
|
||||
B !< Rotation of fcc to Bain coordinate system
|
||||
real(pReal), dimension(3,3) :: &
|
||||
|
|
|
@ -18,7 +18,7 @@ module material
|
|||
private
|
||||
|
||||
type :: tRotationContainer
|
||||
type(Rotation), dimension(:), allocatable :: data
|
||||
type(tRotation), dimension(:), allocatable :: data
|
||||
end type
|
||||
type :: tTensorContainer
|
||||
real(pReal), dimension(:,:,:), allocatable :: data
|
||||
|
|
55
src/math.f90
55
src/math.f90
|
@ -21,10 +21,11 @@ module math
|
|||
config
|
||||
#endif
|
||||
|
||||
real(pReal), parameter :: PI = acos(-1.0_pReal) !< ratio of a circle's circumference to its diameter
|
||||
real(pReal), parameter :: INDEG = 180.0_pReal/PI !< conversion from radian to degree
|
||||
real(pReal), parameter :: INRAD = PI/180.0_pReal !< conversion from degree to radian
|
||||
complex(pReal), parameter :: TWOPIIMG = cmplx(0.0_pReal,2.0_pReal*PI) !< Re(0.0), Im(2xPi)
|
||||
real(pReal), parameter :: &
|
||||
PI = acos(-1.0_pReal), & !< ratio of a circle's circumference to its diameter
|
||||
TAU = 2.0_pReal*PI, & !< ratio of a circle's circumference to its radius
|
||||
INDEG = 360.0_pReal/TAU, & !< conversion from radian to degree
|
||||
INRAD = TAU/360.0_pReal !< conversion from degree to radian
|
||||
|
||||
real(pReal), dimension(3,3), parameter :: &
|
||||
math_I3 = reshape([&
|
||||
|
@ -882,7 +883,7 @@ end function math_Voigt6to33_strain
|
|||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief Convert 3x3 tensor into 6 Voigt stress vector.
|
||||
!> @brief Convert 3x3 stress tensor into 6 Voigt vector.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
pure function math_33toVoigt6_stress(sigma) result(sigma_tilde)
|
||||
|
||||
|
@ -897,7 +898,7 @@ end function math_33toVoigt6_stress
|
|||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief Convert 3x3 tensor into 6 Voigt strain vector.
|
||||
!> @brief Convert 3x3 strain tensor into 6 Voigt vector.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
pure function math_33toVoigt6_strain(epsilon) result(epsilon_tilde)
|
||||
|
||||
|
@ -913,48 +914,48 @@ end function math_33toVoigt6_strain
|
|||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief Convert 6x6 Voigt matrix into symmetric 3x3x3x3 matrix.
|
||||
!> @brief Convert 6x6 Voigt stiffness matrix into symmetric 3x3x3x3 tensor.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
pure function math_Voigt66to3333(m66)
|
||||
pure function math_Voigt66to3333_stiffness(C_tilde) result(C)
|
||||
|
||||
real(pReal), dimension(3,3,3,3) :: math_Voigt66to3333
|
||||
real(pReal), dimension(6,6), intent(in) :: m66 !< 6x6 matrix
|
||||
real(pReal), dimension(3,3,3,3) :: C
|
||||
real(pReal), dimension(6,6), intent(in) :: C_tilde
|
||||
|
||||
integer :: i,j
|
||||
|
||||
|
||||
do i=1,6; do j=1,6
|
||||
math_Voigt66to3333(MAPVOIGT(1,i),MAPVOIGT(2,i),MAPVOIGT(1,j),MAPVOIGT(2,j)) = m66(i,j)
|
||||
math_Voigt66to3333(MAPVOIGT(2,i),MAPVOIGT(1,i),MAPVOIGT(1,j),MAPVOIGT(2,j)) = m66(i,j)
|
||||
math_Voigt66to3333(MAPVOIGT(1,i),MAPVOIGT(2,i),MAPVOIGT(2,j),MAPVOIGT(1,j)) = m66(i,j)
|
||||
math_Voigt66to3333(MAPVOIGT(2,i),MAPVOIGT(1,i),MAPVOIGT(2,j),MAPVOIGT(1,j)) = m66(i,j)
|
||||
C(MAPVOIGT(1,i),MAPVOIGT(2,i),MAPVOIGT(1,j),MAPVOIGT(2,j)) = C_tilde(i,j)
|
||||
C(MAPVOIGT(2,i),MAPVOIGT(1,i),MAPVOIGT(1,j),MAPVOIGT(2,j)) = C_tilde(i,j)
|
||||
C(MAPVOIGT(1,i),MAPVOIGT(2,i),MAPVOIGT(2,j),MAPVOIGT(1,j)) = C_tilde(i,j)
|
||||
C(MAPVOIGT(2,i),MAPVOIGT(1,i),MAPVOIGT(2,j),MAPVOIGT(1,j)) = C_tilde(i,j)
|
||||
end do; end do
|
||||
|
||||
end function math_Voigt66to3333
|
||||
end function math_Voigt66to3333_stiffness
|
||||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief Convert symmetric 3x3x3x3 matrix into 6x6 Voigt matrix.
|
||||
!> @brief Convert 3x3x3x3 stiffness tensor into 6x6 Voigt matrix.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
pure function math_3333toVoigt66(m3333)
|
||||
pure function math_3333toVoigt66_stiffness(C) result(C_tilde)
|
||||
|
||||
real(pReal), dimension(6,6) :: math_3333toVoigt66
|
||||
real(pReal), dimension(3,3,3,3), intent(in) :: m3333 !< symmetric 3x3x3x3 matrix (no internal check)
|
||||
real(pReal), dimension(6,6) :: C_tilde
|
||||
real(pReal), dimension(3,3,3,3), intent(in) :: C
|
||||
|
||||
integer :: i,j
|
||||
|
||||
|
||||
#ifndef __INTEL_COMPILER
|
||||
do concurrent(i=1:6, j=1:6)
|
||||
math_3333toVoigt66(i,j) = m3333(MAPVOIGT(1,i),MAPVOIGT(2,i),MAPVOIGT(1,j),MAPVOIGT(2,j))
|
||||
C_tilde(i,j) = C(MAPVOIGT(1,i),MAPVOIGT(2,i),MAPVOIGT(1,j),MAPVOIGT(2,j))
|
||||
end do
|
||||
#else
|
||||
do i=1,6; do j=1,6
|
||||
math_3333toVoigt66(i,j) = m3333(MAPVOIGT(1,i),MAPVOIGT(2,i),MAPVOIGT(1,j),MAPVOIGT(2,j))
|
||||
C_tilde(i,j) = C(MAPVOIGT(1,i),MAPVOIGT(2,i),MAPVOIGT(1,j),MAPVOIGT(2,j))
|
||||
end do; end do
|
||||
#endif
|
||||
|
||||
end function math_3333toVoigt66
|
||||
end function math_3333toVoigt66_stiffness
|
||||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
|
@ -984,7 +985,7 @@ impure elemental subroutine math_normal(x,mu,sigma)
|
|||
end if
|
||||
|
||||
call random_number(rnd)
|
||||
x = mu_ + sigma_ * sqrt(-2.0_pReal*log(1.0_pReal-rnd(1)))*cos(2.0_pReal*PI*(1.0_pReal - rnd(2)))
|
||||
x = mu_ + sigma_ * sqrt(-2.0_pReal*log(1.0_pReal-rnd(1)))*cos(TAU*(1.0_pReal - rnd(2)))
|
||||
|
||||
end subroutine math_normal
|
||||
|
||||
|
@ -1088,7 +1089,7 @@ pure function math_rotationalPart(F) result(R)
|
|||
if (dNeq0(x)) then
|
||||
Phi = acos(math_clip((I_C(1)**3 -4.5_pReal*I_C(1)*I_C(2) +13.5_pReal*I_C(3))/x,-1.0_pReal,1.0_pReal))
|
||||
lambda = I_C(1) +(2.0_pReal * sqrt(math_clip(I_C(1)**2-3.0_pReal*I_C(2),0.0_pReal))) &
|
||||
*cos((Phi-2.0_pReal * PI*[1.0_pReal,2.0_pReal,3.0_pReal])/3.0_pReal)
|
||||
*cos((Phi-TAU*[1.0_pReal,2.0_pReal,3.0_pReal])/3.0_pReal)
|
||||
lambda = sqrt(math_clip(lambda,0.0_pReal)/3.0_pReal)
|
||||
else
|
||||
lambda = sqrt(I_C(1)/3.0_pReal)
|
||||
|
@ -1154,8 +1155,8 @@ pure function math_eigvalsh33(m)
|
|||
phi=acos(math_clip(-Q/rho*0.5_pReal,-1.0_pReal,1.0_pReal))
|
||||
math_eigvalsh33 = 2.0_pReal*rho**(1.0_pReal/3.0_pReal)* &
|
||||
[cos( phi /3.0_pReal), &
|
||||
cos((phi+2.0_pReal*PI)/3.0_pReal), &
|
||||
cos((phi+4.0_pReal*PI)/3.0_pReal) &
|
||||
cos((phi+TAU)/3.0_pReal), &
|
||||
cos((phi+2.0_pReal*TAU)/3.0_pReal) &
|
||||
] &
|
||||
+ I(1)/3.0_pReal
|
||||
endif
|
||||
|
@ -1343,7 +1344,7 @@ subroutine selfTest
|
|||
if (any(dNeq(math_sym3333to66(math_66toSym3333(t66)),t66,1.0e-15_pReal))) &
|
||||
error stop 'math_sym3333to66/math_66toSym3333'
|
||||
|
||||
if (any(dNeq(math_3333toVoigt66(math_Voigt66to3333(t66)),t66,1.0e-15_pReal))) &
|
||||
if (any(dNeq(math_3333toVoigt66_stiffness(math_Voigt66to3333_stiffness(t66)),t66,1.0e-15_pReal))) &
|
||||
error stop 'math_3333toVoigt66/math_Voigt66to3333'
|
||||
|
||||
call random_number(v6)
|
||||
|
|
|
@ -52,13 +52,13 @@ contains
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine parallelization_init
|
||||
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI, typeSize
|
||||
integer(MPI_INTEGER_KIND) :: err_MPI, typeSize, version, subversion, devNull
|
||||
character(len=4) :: rank_str
|
||||
character(len=MPI_MAX_LIBRARY_VERSION_STRING) :: MPI_library_version
|
||||
!$ integer :: got_env, threadLevel
|
||||
!$ integer(pI32) :: OMP_NUM_THREADS
|
||||
!$ character(len=6) NumThreadsString
|
||||
|
||||
|
||||
PetscErrorCode :: err_PETSc
|
||||
#ifdef _OPENMP
|
||||
! If openMP is enabled, check if the MPI libary supports it and initialize accordingly.
|
||||
|
@ -86,12 +86,22 @@ subroutine parallelization_init
|
|||
if (err_MPI /= 0_MPI_INTEGER_KIND) &
|
||||
error stop 'Could not determine worldrank'
|
||||
|
||||
if (worldrank == 0) print'(/,1x,a)', '<<<+- parallelization init -+>>>'
|
||||
if (worldrank == 0) then
|
||||
print'(/,1x,a)', '<<<+- parallelization init -+>>>'
|
||||
|
||||
call MPI_Get_library_version(MPI_library_version,devNull,err_MPI)
|
||||
print'(/,1x,a)', trim(MPI_library_version)
|
||||
call MPI_Get_version(version,subversion,err_MPI)
|
||||
print'(1x,a,i0,a,i0)', 'MPI standard: ',version,'.',subversion
|
||||
#ifdef _OPENMP
|
||||
print'(1x,a,i0)', 'OpenMP version: ',openmp_version
|
||||
#endif
|
||||
end if
|
||||
|
||||
call MPI_Comm_size(MPI_COMM_WORLD,worldsize,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) &
|
||||
error stop 'Could not determine worldsize'
|
||||
if (worldrank == 0) print'(/,1x,a,i3)', 'MPI processes: ',worldsize
|
||||
if (worldrank == 0) print'(/,1x,a,i0)', 'MPI processes: ',worldsize
|
||||
|
||||
call MPI_Type_size(MPI_INTEGER,typeSize,err_MPI)
|
||||
if (err_MPI /= 0_MPI_INTEGER_KIND) &
|
||||
|
@ -128,7 +138,7 @@ subroutine parallelization_init
|
|||
!$ OMP_NUM_THREADS = 4_pI32
|
||||
!$ endif
|
||||
!$ endif
|
||||
!$ print'(1x,a,1x,i2)', 'OMP_NUM_THREADS:',OMP_NUM_THREADS
|
||||
!$ print'(1x,a,i0)', 'OMP_NUM_THREADS: ',OMP_NUM_THREADS
|
||||
!$ call omp_set_num_threads(OMP_NUM_THREADS)
|
||||
|
||||
end subroutine parallelization_init
|
||||
|
|
|
@ -323,7 +323,6 @@ module phase
|
|||
phase_restore, &
|
||||
plastic_nonlocal_updateCompatibility, &
|
||||
converged, &
|
||||
crystallite_init, &
|
||||
phase_mechanical_constitutive, &
|
||||
phase_thermal_constitutive, &
|
||||
phase_damage_constitutive, &
|
||||
|
@ -401,6 +400,8 @@ subroutine phase_init
|
|||
call damage_init
|
||||
call thermal_init(phases)
|
||||
|
||||
call crystallite_init()
|
||||
|
||||
end subroutine phase_init
|
||||
|
||||
|
||||
|
@ -408,7 +409,7 @@ end subroutine phase_init
|
|||
!> @brief Allocate the components of the state structure for a given phase
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine phase_allocateState(state, &
|
||||
NEntries,sizeState,sizeDotState,sizeDeltaState)
|
||||
NEntries,sizeState,sizeDotState,sizeDeltaState,offsetDeltaState)
|
||||
|
||||
class(tState), intent(inout) :: &
|
||||
state
|
||||
|
@ -417,12 +418,17 @@ subroutine phase_allocateState(state, &
|
|||
sizeState, &
|
||||
sizeDotState, &
|
||||
sizeDeltaState
|
||||
|
||||
integer, intent(in), optional :: &
|
||||
offsetDeltaState
|
||||
|
||||
state%sizeState = sizeState
|
||||
state%sizeDotState = sizeDotState
|
||||
state%sizeDeltaState = sizeDeltaState
|
||||
state%offsetDeltaState = sizeState-sizeDeltaState ! deltaState occupies latter part of state by definition
|
||||
if (present(offsetDeltaState)) then
|
||||
state%offsetDeltaState = offsetDeltaState ! ToDo: this is a fix for broken nonlocal
|
||||
else
|
||||
state%offsetDeltaState = sizeState-sizeDeltaState ! deltaState occupies latter part of state by definition
|
||||
end if
|
||||
|
||||
allocate(state%atol (sizeState), source=0.0_pReal)
|
||||
allocate(state%state0 (sizeState,NEntries), source=0.0_pReal)
|
||||
|
@ -432,7 +438,6 @@ subroutine phase_allocateState(state, &
|
|||
|
||||
allocate(state%deltaState (sizeDeltaState,NEntries), source=0.0_pReal)
|
||||
|
||||
|
||||
end subroutine phase_allocateState
|
||||
|
||||
|
||||
|
@ -496,22 +501,13 @@ subroutine crystallite_init()
|
|||
ce, &
|
||||
co, & !< counter in integration point component loop
|
||||
ip, & !< counter in integration point loop
|
||||
el, & !< counter in element loop
|
||||
cMax, & !< maximum number of integration point components
|
||||
iMax, & !< maximum number of integration points
|
||||
eMax !< maximum number of elements
|
||||
el !< counter in element loop
|
||||
|
||||
class(tNode), pointer :: &
|
||||
num_crystallite, &
|
||||
phases
|
||||
|
||||
|
||||
print'(/,1x,a)', '<<<+- crystallite init -+>>>'
|
||||
|
||||
cMax = homogenization_maxNconstituents
|
||||
iMax = discretization_nIPs
|
||||
eMax = discretization_Nelems
|
||||
|
||||
num_crystallite => config_numerics%get('crystallite',defaultVal=emptyDict)
|
||||
|
||||
num%subStepMinCryst = num_crystallite%get_asFloat ('subStepMin', defaultVal=1.0e-3_pReal)
|
||||
|
@ -545,15 +541,9 @@ subroutine crystallite_init()
|
|||
|
||||
phases => config_material%get('phase')
|
||||
|
||||
print'(/,a42,1x,i10)', ' # of elements: ', eMax
|
||||
print'( a42,1x,i10)', ' # of integration points/element: ', iMax
|
||||
print'( a42,1x,i10)', 'max # of constituents/integration point: ', cMax
|
||||
flush(IO_STDOUT)
|
||||
|
||||
|
||||
!$OMP PARALLEL DO PRIVATE(ce)
|
||||
do el = 1, eMax
|
||||
do ip = 1, iMax
|
||||
do el = 1, discretization_Nelems
|
||||
do ip = 1, discretization_nIPs
|
||||
ce = (el-1)*discretization_nIPs + ip
|
||||
do co = 1,homogenization_Nconstituents(material_homogenizationID(ce))
|
||||
call crystallite_orientations(co,ip,el)
|
||||
|
|
|
@ -40,8 +40,6 @@ submodule(phase) mechanical
|
|||
integer(kind(PLASTIC_undefined_ID)), dimension(:), allocatable :: &
|
||||
phase_plasticity !< plasticity of each phase
|
||||
|
||||
integer :: phase_plasticity_maxSizeDotState
|
||||
|
||||
interface
|
||||
|
||||
module subroutine eigen_init(phases)
|
||||
|
@ -81,7 +79,7 @@ submodule(phase) mechanical
|
|||
en
|
||||
end subroutine plastic_isotropic_LiAndItsTangent
|
||||
|
||||
module function plastic_dotState(subdt,co,ip,el,ph,en) result(broken)
|
||||
module function plastic_dotState(subdt,co,ip,el,ph,en) result(dotState)
|
||||
integer, intent(in) :: &
|
||||
co, & !< component-ID of integration point
|
||||
ip, & !< integration point
|
||||
|
@ -90,7 +88,8 @@ submodule(phase) mechanical
|
|||
en
|
||||
real(pReal), intent(in) :: &
|
||||
subdt !< timestep
|
||||
logical :: broken
|
||||
real(pReal), dimension(plasticState(ph)%sizeDotState) :: &
|
||||
dotState
|
||||
end function plastic_dotState
|
||||
|
||||
module function plastic_deltaState(ph, en) result(broken)
|
||||
|
@ -296,8 +295,6 @@ module subroutine mechanical_init(phases)
|
|||
do ph = 1,phases%length
|
||||
plasticState(ph)%state0 = plasticState(ph)%state
|
||||
enddo
|
||||
phase_plasticity_maxSizeDotState = maxval(plasticState%sizeDotState)
|
||||
|
||||
|
||||
num_crystallite => config_numerics%get('crystallite',defaultVal=emptyDict)
|
||||
|
||||
|
@ -588,9 +585,9 @@ function integrateStateFPI(F_0,F,subFp0,subFi0,subState0,Delta_t,co,ip,el) resul
|
|||
real(pReal), intent(in),dimension(:) :: subState0
|
||||
real(pReal), intent(in) :: Delta_t
|
||||
integer, intent(in) :: &
|
||||
el, & !< element index in element loop
|
||||
ip, & !< integration point index in ip loop
|
||||
co !< grain index in grain loop
|
||||
el, & !< element index in element loop
|
||||
ip, & !< integration point index in ip loop
|
||||
co !< grain index in grain loop
|
||||
logical :: &
|
||||
broken
|
||||
|
||||
|
@ -601,43 +598,43 @@ function integrateStateFPI(F_0,F,subFp0,subFi0,subState0,Delta_t,co,ip,el) resul
|
|||
sizeDotState
|
||||
real(pReal) :: &
|
||||
zeta
|
||||
real(pReal), dimension(phase_plasticity_maxSizeDotState) :: &
|
||||
r ! state residuum
|
||||
real(pReal), dimension(phase_plasticity_maxSizeDotState,2) :: &
|
||||
real(pReal), dimension(plasticState(material_phaseID(co,(el-1)*discretization_nIPs+ip))%sizeDotState) :: &
|
||||
r, & ! state residuum
|
||||
dotState
|
||||
real(pReal), dimension(plasticState(material_phaseID(co,(el-1)*discretization_nIPs+ip))%sizeDotState,2) :: &
|
||||
dotState_last
|
||||
|
||||
|
||||
ph = material_phaseID(co,(el-1)*discretization_nIPs + ip)
|
||||
en = material_phaseEntry(co,(el-1)*discretization_nIPs + ip)
|
||||
broken = .true.
|
||||
|
||||
broken = plastic_dotState(Delta_t, co,ip,el,ph,en)
|
||||
if(broken) return
|
||||
dotState = plastic_dotState(Delta_t, co,ip,el,ph,en)
|
||||
if (any(IEEE_is_NaN(dotState))) return
|
||||
|
||||
sizeDotState = plasticState(ph)%sizeDotState
|
||||
plasticState(ph)%state(1:sizeDotState,en) = subState0 &
|
||||
+ plasticState(ph)%dotState (1:sizeDotState,en) * Delta_t
|
||||
plasticState(ph)%state(1:sizeDotState,en) = subState0 + dotState * Delta_t
|
||||
|
||||
iteration: do NiterationState = 1, num%nState
|
||||
|
||||
dotState(1:sizeDotState,2) = merge(dotState(1:sizeDotState,1),0.0, nIterationState > 1)
|
||||
dotState(1:sizeDotState,1) = plasticState(ph)%dotState(:,en)
|
||||
dotState_last(1:sizeDotState,2) = merge(dotState_last(1:sizeDotState,1),0.0, nIterationState > 1)
|
||||
dotState_last(1:sizeDotState,1) = dotState
|
||||
|
||||
broken = integrateStress(F,subFp0,subFi0,Delta_t,co,ip,el)
|
||||
if(broken) exit iteration
|
||||
|
||||
broken = plastic_dotState(Delta_t, co,ip,el,ph,en)
|
||||
if(broken) exit iteration
|
||||
dotState = plastic_dotState(Delta_t, co,ip,el,ph,en)
|
||||
if (any(IEEE_is_NaN(dotState))) exit iteration
|
||||
|
||||
zeta = damper(plasticState(ph)%dotState(:,en),dotState(1:sizeDotState,1),&
|
||||
dotState(1:sizeDotState,2))
|
||||
plasticState(ph)%dotState(:,en) = plasticState(ph)%dotState(:,en) * zeta &
|
||||
+ dotState(1:sizeDotState,1) * (1.0_pReal - zeta)
|
||||
r(1:sizeDotState) = plasticState(ph)%state(1:sizeDotState,en) &
|
||||
- subState0 &
|
||||
- plasticState(ph)%dotState(1:sizeDotState,en) * Delta_t
|
||||
plasticState(ph)%state(1:sizeDotState,en) = plasticState(ph)%state(1:sizeDotState,en) &
|
||||
- r(1:sizeDotState)
|
||||
if (converged(r(1:sizeDotState),plasticState(ph)%state(1:sizeDotState,en),plasticState(ph)%atol(1:sizeDotState))) then
|
||||
zeta = damper(dotState,dotState_last(1:sizeDotState,1),dotState_last(1:sizeDotState,2))
|
||||
dotState = dotState * zeta &
|
||||
+ dotState_last(1:sizeDotState,1) * (1.0_pReal - zeta)
|
||||
r = plasticState(ph)%state(1:sizeDotState,en) &
|
||||
- subState0 &
|
||||
- dotState * Delta_t
|
||||
plasticState(ph)%state(1:sizeDotState,en) = plasticState(ph)%state(1:sizeDotState,en) - r
|
||||
|
||||
if (converged(r,plasticState(ph)%state(1:sizeDotState,en),plasticState(ph)%atol(1:sizeDotState))) then
|
||||
broken = plastic_deltaState(ph,en)
|
||||
exit iteration
|
||||
endif
|
||||
|
@ -652,19 +649,20 @@ function integrateStateFPI(F_0,F,subFp0,subFi0,subState0,Delta_t,co,ip,el) resul
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
real(pReal) pure function damper(omega_0,omega_1,omega_2)
|
||||
|
||||
real(pReal), dimension(:), intent(in) :: &
|
||||
omega_0, omega_1, omega_2
|
||||
real(pReal), dimension(:), intent(in) :: &
|
||||
omega_0, omega_1, omega_2
|
||||
|
||||
real(pReal) :: dot_prod12, dot_prod22
|
||||
real(pReal) :: dot_prod12, dot_prod22
|
||||
|
||||
dot_prod12 = dot_product(omega_0-omega_1, omega_1-omega_2)
|
||||
dot_prod22 = dot_product(omega_1-omega_2, omega_1-omega_2)
|
||||
|
||||
if (min(dot_product(omega_0,omega_1),dot_prod12) < 0.0_pReal .and. dot_prod22 > 0.0_pReal) then
|
||||
damper = 0.75_pReal + 0.25_pReal * tanh(2.0_pReal + 4.0_pReal * dot_prod12 / dot_prod22)
|
||||
else
|
||||
damper = 1.0_pReal
|
||||
endif
|
||||
dot_prod12 = dot_product(omega_0-omega_1, omega_1-omega_2)
|
||||
dot_prod22 = dot_product(omega_1-omega_2, omega_1-omega_2)
|
||||
|
||||
if (min(dot_product(omega_0,omega_1),dot_prod12) < 0.0_pReal .and. dot_prod22 > 0.0_pReal) then
|
||||
damper = 0.75_pReal + 0.25_pReal * tanh(2.0_pReal + 4.0_pReal * dot_prod12 / dot_prod22)
|
||||
else
|
||||
damper = 1.0_pReal
|
||||
endif
|
||||
|
||||
end function damper
|
||||
|
||||
|
@ -686,6 +684,8 @@ function integrateStateEuler(F_0,F,subFp0,subFi0,subState0,Delta_t,co,ip,el) res
|
|||
logical :: &
|
||||
broken
|
||||
|
||||
real(pReal), dimension(plasticState(material_phaseID(co,(el-1)*discretization_nIPs+ip))%sizeDotState) :: &
|
||||
dotState
|
||||
integer :: &
|
||||
ph, &
|
||||
en, &
|
||||
|
@ -694,13 +694,14 @@ function integrateStateEuler(F_0,F,subFp0,subFi0,subState0,Delta_t,co,ip,el) res
|
|||
|
||||
ph = material_phaseID(co,(el-1)*discretization_nIPs + ip)
|
||||
en = material_phaseEntry(co,(el-1)*discretization_nIPs + ip)
|
||||
broken = .true.
|
||||
|
||||
broken = plastic_dotState(Delta_t, co,ip,el,ph,en)
|
||||
if(broken) return
|
||||
dotState = plastic_dotState(Delta_t, co,ip,el,ph,en)
|
||||
if (any(IEEE_is_NaN(dotState))) return
|
||||
|
||||
sizeDotState = plasticState(ph)%sizeDotState
|
||||
plasticState(ph)%state(1:sizeDotState,en) = subState0 &
|
||||
+ plasticState(ph)%dotState(1:sizeDotState,en) * Delta_t
|
||||
+ dotState * Delta_t
|
||||
|
||||
broken = plastic_deltaState(ph,en)
|
||||
if(broken) return
|
||||
|
@ -729,20 +730,23 @@ function integrateStateAdaptiveEuler(F_0,F,subFp0,subFi0,subState0,Delta_t,co,ip
|
|||
ph, &
|
||||
en, &
|
||||
sizeDotState
|
||||
real(pReal), dimension(phase_plasticity_maxSizeDotState) :: residuum_plastic
|
||||
real(pReal), dimension(plasticState(material_phaseID(co,(el-1)*discretization_nIPs+ip))%sizeDotState) :: &
|
||||
r, &
|
||||
dotState
|
||||
|
||||
|
||||
ph = material_phaseID(co,(el-1)*discretization_nIPs + ip)
|
||||
en = material_phaseEntry(co,(el-1)*discretization_nIPs + ip)
|
||||
broken = .true.
|
||||
|
||||
broken = plastic_dotState(Delta_t, co,ip,el,ph,en)
|
||||
if(broken) return
|
||||
dotState = plastic_dotState(Delta_t, co,ip,el,ph,en)
|
||||
if (any(IEEE_is_NaN(dotState))) return
|
||||
|
||||
sizeDotState = plasticState(ph)%sizeDotState
|
||||
|
||||
residuum_plastic(1:sizeDotState) = - plasticState(ph)%dotstate(1:sizeDotState,en) * 0.5_pReal * Delta_t
|
||||
r = - dotState * 0.5_pReal * Delta_t
|
||||
plasticState(ph)%state(1:sizeDotState,en) = subState0 &
|
||||
+ plasticState(ph)%dotstate(1:sizeDotState,en) * Delta_t
|
||||
+ dotState * Delta_t
|
||||
|
||||
broken = plastic_deltaState(ph,en)
|
||||
if(broken) return
|
||||
|
@ -750,10 +754,10 @@ function integrateStateAdaptiveEuler(F_0,F,subFp0,subFi0,subState0,Delta_t,co,ip
|
|||
broken = integrateStress(F,subFp0,subFi0,Delta_t,co,ip,el)
|
||||
if(broken) return
|
||||
|
||||
broken = plastic_dotState(Delta_t, co,ip,el,ph,en)
|
||||
if(broken) return
|
||||
dotState = plastic_dotState(Delta_t, co,ip,el,ph,en)
|
||||
if (any(IEEE_is_NaN(dotState))) return
|
||||
|
||||
broken = .not. converged(residuum_plastic(1:sizeDotState) + 0.5_pReal * plasticState(ph)%dotState(:,en) * Delta_t, &
|
||||
broken = .not. converged(r + 0.5_pReal * dotState * Delta_t, &
|
||||
plasticState(ph)%state(1:sizeDotState,en), &
|
||||
plasticState(ph)%atol(1:sizeDotState))
|
||||
|
||||
|
@ -847,44 +851,48 @@ function integrateStateRK(F_0,F,subFp0,subFi0,subState0,Delta_t,co,ip,el,A,B,C,D
|
|||
ph, &
|
||||
en, &
|
||||
sizeDotState
|
||||
real(pReal), dimension(phase_plasticity_maxSizeDotState,size(B)) :: plastic_RKdotState
|
||||
real(pReal), dimension(plasticState(material_phaseID(co,(el-1)*discretization_nIPs+ip))%sizeDotState) :: &
|
||||
dotState
|
||||
real(pReal), dimension(plasticState(material_phaseID(co,(el-1)*discretization_nIPs+ip))%sizeDotState,size(B)) :: &
|
||||
plastic_RKdotState
|
||||
|
||||
|
||||
ph = material_phaseID(co,(el-1)*discretization_nIPs + ip)
|
||||
en = material_phaseEntry(co,(el-1)*discretization_nIPs + ip)
|
||||
broken = .true.
|
||||
|
||||
broken = plastic_dotState(Delta_t,co,ip,el,ph,en)
|
||||
if(broken) return
|
||||
dotState = plastic_dotState(Delta_t, co,ip,el,ph,en)
|
||||
if (any(IEEE_is_NaN(dotState))) return
|
||||
|
||||
sizeDotState = plasticState(ph)%sizeDotState
|
||||
|
||||
do stage = 1, size(A,1)
|
||||
|
||||
plastic_RKdotState(1:sizeDotState,stage) = plasticState(ph)%dotState(:,en)
|
||||
plasticState(ph)%dotState(:,en) = A(1,stage) * plastic_RKdotState(1:sizeDotState,1)
|
||||
plastic_RKdotState(1:sizeDotState,stage) = dotState
|
||||
dotState = A(1,stage) * plastic_RKdotState(1:sizeDotState,1)
|
||||
|
||||
do n = 2, stage
|
||||
plasticState(ph)%dotState(:,en) = plasticState(ph)%dotState(:,en) &
|
||||
+ A(n,stage) * plastic_RKdotState(1:sizeDotState,n)
|
||||
dotState = dotState &
|
||||
+ A(n,stage) * plastic_RKdotState(1:sizeDotState,n)
|
||||
enddo
|
||||
|
||||
plasticState(ph)%state(1:sizeDotState,en) = subState0 &
|
||||
+ plasticState(ph)%dotState (1:sizeDotState,en) * Delta_t
|
||||
+ dotState * Delta_t
|
||||
|
||||
broken = integrateStress(F_0 + (F - F_0) * Delta_t * C(stage),subFp0,subFi0,Delta_t * C(stage),co,ip,el)
|
||||
broken = integrateStress(F_0 + (F-F_0) * Delta_t*C(stage),subFp0,subFi0,Delta_t*C(stage),co,ip,el)
|
||||
if(broken) exit
|
||||
|
||||
broken = plastic_dotState(Delta_t*C(stage),co,ip,el,ph,en)
|
||||
if(broken) exit
|
||||
dotState = plastic_dotState(Delta_t*C(stage), co,ip,el,ph,en)
|
||||
if (any(IEEE_is_NaN(dotState))) exit
|
||||
|
||||
enddo
|
||||
if(broken) return
|
||||
|
||||
|
||||
plastic_RKdotState(1:sizeDotState,size(B)) = plasticState (ph)%dotState(:,en)
|
||||
plasticState(ph)%dotState(:,en) = matmul(plastic_RKdotState(1:sizeDotState,1:size(B)),B)
|
||||
plastic_RKdotState(1:sizeDotState,size(B)) = dotState
|
||||
dotState = matmul(plastic_RKdotState,B)
|
||||
plasticState(ph)%state(1:sizeDotState,en) = subState0 &
|
||||
+ plasticState(ph)%dotState (1:sizeDotState,en) * Delta_t
|
||||
+ dotState * Delta_t
|
||||
|
||||
if(present(DB)) &
|
||||
broken = .not. converged(matmul(plastic_RKdotState(1:sizeDotState,1:size(DB)),DB) * Delta_t, &
|
||||
|
@ -958,7 +966,7 @@ subroutine crystallite_results(group,ph)
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
function to_quaternion(dataset)
|
||||
|
||||
type(rotation), dimension(:), intent(in) :: dataset
|
||||
type(tRotation), dimension(:), intent(in) :: dataset
|
||||
real(pReal), dimension(4,size(dataset,1)) :: to_quaternion
|
||||
|
||||
integer :: i
|
||||
|
|
|
@ -27,7 +27,7 @@ module function thermalexpansion_init(kinematics_length) result(myKinematics)
|
|||
integer, intent(in) :: kinematics_length
|
||||
logical, dimension(:,:), allocatable :: myKinematics
|
||||
|
||||
integer :: Ninstances,p,i,k
|
||||
integer :: Ninstances, p, k
|
||||
class(tNode), pointer :: &
|
||||
phases, &
|
||||
phase, &
|
||||
|
|
|
@ -48,7 +48,7 @@ module subroutine elastic_init(phases)
|
|||
prm%C_11 = polynomial(elastic%asDict(),'C_11','T')
|
||||
prm%C_12 = polynomial(elastic%asDict(),'C_12','T')
|
||||
prm%C_44 = polynomial(elastic%asDict(),'C_44','T')
|
||||
|
||||
|
||||
if (any(phase_lattice(ph) == ['hP','tI'])) then
|
||||
prm%C_13 = polynomial(elastic%asDict(),'C_13','T')
|
||||
prm%C_33 = polynomial(elastic%asDict(),'C_33','T')
|
||||
|
@ -162,7 +162,7 @@ module subroutine phase_hooke_SandItsTangents(S, dS_dFe, dS_dFi, &
|
|||
|
||||
|
||||
C66 = phase_damage_C66(phase_homogenizedC66(ph,en),ph,en)
|
||||
C = math_Voigt66to3333(C66)
|
||||
C = math_Voigt66to3333_stiffness(C66)
|
||||
|
||||
E = 0.5_pReal*(matmul(transpose(Fe),Fe)-math_I3) !< Green-Lagrange strain in unloaded configuration
|
||||
S = math_Voigt6to33_stress(matmul(C66,math_33toVoigt6_strain(matmul(matmul(transpose(Fi),E),Fi))))!< 2PK stress in lattice configuration in work conjugate with GL strain pulled back to lattice configuration
|
||||
|
|
|
@ -154,11 +154,10 @@ submodule(phase:mechanical) plastic
|
|||
en
|
||||
end subroutine dislotungsten_dotState
|
||||
|
||||
module subroutine nonlocal_dotState(Mp,Temperature,timestep,ph,en,ip,el)
|
||||
module subroutine nonlocal_dotState(Mp,timestep,ph,en,ip,el)
|
||||
real(pReal), dimension(3,3), intent(in) :: &
|
||||
Mp !< MandelStress
|
||||
real(pReal), intent(in) :: &
|
||||
Temperature, & !< temperature
|
||||
timestep !< substepped crystallite time increment
|
||||
integer, intent(in) :: &
|
||||
ph, &
|
||||
|
@ -285,7 +284,7 @@ module subroutine plastic_LpAndItsTangents(Lp, dLp_dS, dLp_dFi, &
|
|||
do i=1,3; do j=1,3
|
||||
dLp_dFi(i,j,1:3,1:3) = matmul(matmul(Fi,S),transpose(dLp_dMp(i,j,1:3,1:3))) + &
|
||||
matmul(matmul(Fi,dLp_dMp(i,j,1:3,1:3)),S)
|
||||
dLp_dS(i,j,1:3,1:3) = matmul(matmul(transpose(Fi),Fi),dLp_dMp(i,j,1:3,1:3)) ! ToDo: @PS: why not: dLp_dMp:(FiT Fi)
|
||||
dLp_dS(i,j,1:3,1:3) = matmul(matmul(transpose(Fi),Fi),dLp_dMp(i,j,1:3,1:3)) ! ToDo: @PS: why not: dLp_dMp:(FiT Fi)
|
||||
end do; end do
|
||||
|
||||
end if
|
||||
|
@ -296,7 +295,7 @@ end subroutine plastic_LpAndItsTangents
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief contains the constitutive equation for calculating the rate of change of microstructure
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
module function plastic_dotState(subdt,co,ip,el,ph,en) result(broken)
|
||||
module function plastic_dotState(subdt,co,ip,el,ph,en) result(dotState)
|
||||
|
||||
integer, intent(in) :: &
|
||||
co, & !< component-ID of integration point
|
||||
|
@ -308,7 +307,8 @@ module function plastic_dotState(subdt,co,ip,el,ph,en) result(broken)
|
|||
subdt !< timestep
|
||||
real(pReal), dimension(3,3) :: &
|
||||
Mp
|
||||
logical :: broken
|
||||
real(pReal), dimension(plasticState(ph)%sizeDotState) :: &
|
||||
dotState
|
||||
|
||||
|
||||
if (phase_plasticity(ph) /= PLASTIC_NONE_ID) then
|
||||
|
@ -333,11 +333,11 @@ module function plastic_dotState(subdt,co,ip,el,ph,en) result(broken)
|
|||
call dislotungsten_dotState(Mp,thermal_T(ph,en),ph,en)
|
||||
|
||||
case (PLASTIC_NONLOCAL_ID) plasticType
|
||||
call nonlocal_dotState(Mp,thermal_T(ph,en),subdt,ph,en,ip,el)
|
||||
call nonlocal_dotState(Mp,subdt,ph,en,ip,el)
|
||||
end select plasticType
|
||||
end if
|
||||
|
||||
broken = any(IEEE_is_NaN(plasticState(ph)%dotState(:,en)))
|
||||
dotState = plasticState(ph)%dotState(:,en)
|
||||
|
||||
end function plastic_dotState
|
||||
|
||||
|
@ -393,6 +393,7 @@ module function plastic_deltaState(ph, en) result(broken)
|
|||
myOffset, &
|
||||
mySize
|
||||
|
||||
|
||||
broken = .false.
|
||||
|
||||
select case (phase_plasticity(ph))
|
||||
|
|
|
@ -341,7 +341,7 @@ module subroutine dislotungsten_dotState(Mp,T,ph,en)
|
|||
dot_rho_dip_formation = merge(2.0_pReal*(d_hat-prm%d_caron)*stt%rho_mob(:,en)*dot%gamma_sl(:,en)/prm%b_sl, &
|
||||
0.0_pReal, &
|
||||
prm%dipoleformation)
|
||||
v_cl = (3.0_pReal*mu*prm%D_0*exp(-prm%Q_cl/(K_B*T))*prm%f_at/(2.0_pReal*PI*K_B*T)) &
|
||||
v_cl = (3.0_pReal*mu*prm%D_0*exp(-prm%Q_cl/(K_B*T))*prm%f_at/(TAU*K_B*T)) &
|
||||
* (1.0_pReal/(d_hat+prm%d_caron))
|
||||
dot_rho_dip_climb = (4.0_pReal*v_cl*stt%rho_dip(:,en))/(d_hat-prm%d_caron) ! ToDo: Discuss with Franz: Stress dependency?
|
||||
end where
|
||||
|
|
|
@ -403,7 +403,7 @@ module function plastic_nonlocal_init() result(myPlasticity)
|
|||
'maxDipoleHeightEdge ','maxDipoleHeightScrew' ]) * prm%sum_N_sl !< other dependent state variables that are not updated by microstructure
|
||||
sizeDeltaState = sizeDotState
|
||||
|
||||
call phase_allocateState(plasticState(ph),Nmembers,sizeState,sizeDotState,sizeDeltaState)
|
||||
call phase_allocateState(plasticState(ph),Nmembers,sizeState,sizeDotState,sizeDeltaState,0) ! ToDo: state structure does not follow convention
|
||||
|
||||
allocate(geom(ph)%V_0(Nmembers))
|
||||
call storeGeometry(ph)
|
||||
|
@ -411,9 +411,6 @@ module function plastic_nonlocal_init() result(myPlasticity)
|
|||
if(plasticState(ph)%nonlocal .and. .not. allocated(IPneighborhood)) &
|
||||
call IO_error(212,ext_msg='IPneighborhood does not exist')
|
||||
|
||||
|
||||
plasticState(ph)%offsetDeltaState = 0 ! ToDo: state structure does not follow convention
|
||||
|
||||
st0%rho => plasticState(ph)%state0 (0*prm%sum_N_sl+1:10*prm%sum_N_sl,:)
|
||||
stt%rho => plasticState(ph)%state (0*prm%sum_N_sl+1:10*prm%sum_N_sl,:)
|
||||
dot%rho => plasticState(ph)%dotState (0*prm%sum_N_sl+1:10*prm%sum_N_sl,:)
|
||||
|
@ -941,13 +938,12 @@ end subroutine plastic_nonlocal_deltaState
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
!> @brief calculates the rate of change of microstructure
|
||||
!---------------------------------------------------------------------------------------------------
|
||||
module subroutine nonlocal_dotState(Mp, Temperature,timestep, &
|
||||
module subroutine nonlocal_dotState(Mp,timestep, &
|
||||
ph,en,ip,el)
|
||||
|
||||
real(pReal), dimension(3,3), intent(in) :: &
|
||||
Mp !< MandelStress
|
||||
real(pReal), intent(in) :: &
|
||||
Temperature, & !< temperature
|
||||
timestep !< substepped crystallite time increment
|
||||
integer, intent(in) :: &
|
||||
ph, &
|
||||
|
@ -984,7 +980,7 @@ module subroutine nonlocal_dotState(Mp, Temperature,timestep, &
|
|||
real(pReal) :: &
|
||||
D_SD, &
|
||||
mu, &
|
||||
nu
|
||||
nu, Temperature
|
||||
|
||||
if (timestep <= 0.0_pReal) then
|
||||
plasticState(ph)%dotState = 0.0_pReal
|
||||
|
@ -995,6 +991,7 @@ module subroutine nonlocal_dotState(Mp, Temperature,timestep, &
|
|||
|
||||
mu = elastic_mu(ph,en)
|
||||
nu = elastic_nu(ph,en)
|
||||
Temperature = thermal_T(ph,en)
|
||||
|
||||
tau = 0.0_pReal
|
||||
dot_gamma = 0.0_pReal
|
||||
|
@ -1195,7 +1192,6 @@ function rhoDotFlux(timestep,ph,en,ip,el)
|
|||
|
||||
associate(prm => param(ph), &
|
||||
dst => dependentState(ph), &
|
||||
dot => dotState(ph), &
|
||||
stt => state(ph))
|
||||
ns = prm%sum_N_sl
|
||||
|
||||
|
@ -1206,7 +1202,7 @@ function rhoDotFlux(timestep,ph,en,ip,el)
|
|||
rho0 = getRho0(ph,en)
|
||||
my_rhoSgl0 = rho0(:,sgl)
|
||||
|
||||
forall (s = 1:ns, t = 1:4) v(s,t) = plasticState(ph)%state(iV(s,t,ph),en) !ToDo: MD: I think we should use state0 here
|
||||
forall (s = 1:ns, t = 1:4) v(s,t) = plasticState(ph)%state(iV(s,t,ph),en) !ToDo: MD: I think we should use state0 here
|
||||
dot_gamma = rhoSgl(:,1:4) * v * spread(prm%b_sl,2,4)
|
||||
|
||||
forall (s = 1:ns, t = 1:4) v0(s,t) = plasticState(ph)%state0(iV(s,t,ph),en)
|
||||
|
@ -1217,7 +1213,7 @@ function rhoDotFlux(timestep,ph,en,ip,el)
|
|||
if (plasticState(ph)%nonlocal) then
|
||||
|
||||
!*** check CFL (Courant-Friedrichs-Lewy) condition for flux
|
||||
if (any( abs(dot_gamma) > 0.0_pReal & ! any active slip system ...
|
||||
if (any( abs(dot_gamma) > 0.0_pReal & ! any active slip system ...
|
||||
.and. prm%C_CFL * abs(v0) * timestep &
|
||||
> IPvolume(ip,el) / maxval(IParea(:,ip,el)))) then ! ...with velocity above critical value (we use the reference volume and area for simplicity here)
|
||||
#ifdef DEBUG
|
||||
|
@ -1395,7 +1391,7 @@ module subroutine plastic_nonlocal_updateCompatibility(orientation,ph,i,e)
|
|||
nThresholdValues
|
||||
logical, dimension(param(ph)%sum_N_sl) :: &
|
||||
belowThreshold
|
||||
type(rotation) :: mis
|
||||
type(tRotation) :: mis
|
||||
|
||||
|
||||
associate(prm => param(ph))
|
||||
|
|
|
@ -55,7 +55,7 @@ module rotations
|
|||
|
||||
real(pReal), parameter :: P = -1.0_pReal !< parameter for orientation conversion.
|
||||
|
||||
type, public :: rotation
|
||||
type, public :: tRotation
|
||||
real(pReal), dimension(4) :: q
|
||||
contains
|
||||
procedure, public :: asQuaternion
|
||||
|
@ -78,10 +78,9 @@ module rotations
|
|||
procedure, public :: rotStiffness
|
||||
procedure, public :: misorientation
|
||||
procedure, public :: standardize
|
||||
end type rotation
|
||||
end type tRotation
|
||||
|
||||
real(pReal), parameter :: &
|
||||
SPI = sqrt(PI), &
|
||||
PREF = sqrt(6.0_pReal/PI), &
|
||||
A = PI**(5.0_pReal/6.0_pReal)/6.0_pReal**(1.0_pReal/6.0_pReal), &
|
||||
AP = PI**(2.0_pReal/3.0_pReal), &
|
||||
|
@ -118,8 +117,8 @@ end subroutine rotations_init
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
pure function asQuaternion(self)
|
||||
|
||||
class(rotation), intent(in) :: self
|
||||
real(pReal), dimension(4) :: asQuaternion
|
||||
class(tRotation), intent(in) :: self
|
||||
real(pReal), dimension(4) :: asQuaternion
|
||||
|
||||
asQuaternion = self%q
|
||||
|
||||
|
@ -127,8 +126,8 @@ end function asQuaternion
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
pure function asEulers(self)
|
||||
|
||||
class(rotation), intent(in) :: self
|
||||
real(pReal), dimension(3) :: asEulers
|
||||
class(tRotation), intent(in) :: self
|
||||
real(pReal), dimension(3) :: asEulers
|
||||
|
||||
asEulers = qu2eu(self%q)
|
||||
|
||||
|
@ -136,8 +135,8 @@ end function asEulers
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
pure function asAxisAngle(self)
|
||||
|
||||
class(rotation), intent(in) :: self
|
||||
real(pReal), dimension(4) :: asAxisAngle
|
||||
class(tRotation), intent(in) :: self
|
||||
real(pReal), dimension(4) :: asAxisAngle
|
||||
|
||||
asAxisAngle = qu2ax(self%q)
|
||||
|
||||
|
@ -145,8 +144,8 @@ end function asAxisAngle
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
pure function asMatrix(self)
|
||||
|
||||
class(rotation), intent(in) :: self
|
||||
real(pReal), dimension(3,3) :: asMatrix
|
||||
class(tRotation), intent(in) :: self
|
||||
real(pReal), dimension(3,3) :: asMatrix
|
||||
|
||||
asMatrix = qu2om(self%q)
|
||||
|
||||
|
@ -154,8 +153,8 @@ end function asMatrix
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
pure function asRodrigues(self)
|
||||
|
||||
class(rotation), intent(in) :: self
|
||||
real(pReal), dimension(4) :: asRodrigues
|
||||
class(tRotation), intent(in) :: self
|
||||
real(pReal), dimension(4) :: asRodrigues
|
||||
|
||||
asRodrigues = qu2ro(self%q)
|
||||
|
||||
|
@ -163,8 +162,8 @@ end function asRodrigues
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
pure function asHomochoric(self)
|
||||
|
||||
class(rotation), intent(in) :: self
|
||||
real(pReal), dimension(3) :: asHomochoric
|
||||
class(tRotation), intent(in) :: self
|
||||
real(pReal), dimension(3) :: asHomochoric
|
||||
|
||||
asHomochoric = qu2ho(self%q)
|
||||
|
||||
|
@ -175,7 +174,7 @@ end function asHomochoric
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
subroutine fromQuaternion(self,qu)
|
||||
|
||||
class(rotation), intent(out) :: self
|
||||
class(tRotation), intent(out) :: self
|
||||
real(pReal), dimension(4), intent(in) :: qu
|
||||
|
||||
if (dNeq(norm2(qu),1.0_pReal,1.0e-8_pReal)) call IO_error(402,ext_msg='fromQuaternion')
|
||||
|
@ -186,7 +185,7 @@ end subroutine fromQuaternion
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
subroutine fromEulers(self,eu,degrees)
|
||||
|
||||
class(rotation), intent(out) :: self
|
||||
class(tRotation), intent(out) :: self
|
||||
real(pReal), dimension(3), intent(in) :: eu
|
||||
logical, intent(in), optional :: degrees
|
||||
|
||||
|
@ -198,7 +197,7 @@ subroutine fromEulers(self,eu,degrees)
|
|||
Eulers = merge(eu*INRAD,eu,degrees)
|
||||
endif
|
||||
|
||||
if (any(Eulers<0.0_pReal) .or. any(Eulers>2.0_pReal*PI) .or. Eulers(2) > PI) &
|
||||
if (any(Eulers<0.0_pReal) .or. any(Eulers>TAU) .or. Eulers(2) > PI) &
|
||||
call IO_error(402,ext_msg='fromEulers')
|
||||
|
||||
self%q = eu2qu(Eulers)
|
||||
|
@ -207,7 +206,7 @@ end subroutine fromEulers
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
subroutine fromAxisAngle(self,ax,degrees,P)
|
||||
|
||||
class(rotation), intent(out) :: self
|
||||
class(tRotation), intent(out) :: self
|
||||
real(pReal), dimension(4), intent(in) :: ax
|
||||
logical, intent(in), optional :: degrees
|
||||
integer, intent(in), optional :: P
|
||||
|
@ -237,7 +236,7 @@ end subroutine fromAxisAngle
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
subroutine fromMatrix(self,om)
|
||||
|
||||
class(rotation), intent(out) :: self
|
||||
class(tRotation), intent(out) :: self
|
||||
real(pReal), dimension(3,3), intent(in) :: om
|
||||
|
||||
if (dNeq(math_det33(om),1.0_pReal,tol=1.0e-5_pReal)) &
|
||||
|
@ -254,10 +253,10 @@ end subroutine fromMatrix
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
pure elemental function rotRot__(self,R) result(rRot)
|
||||
|
||||
type(rotation) :: rRot
|
||||
class(rotation), intent(in) :: self,R
|
||||
type(tRotation) :: rRot
|
||||
class(tRotation), intent(in) :: self,R
|
||||
|
||||
rRot = rotation(multiply_quaternion(self%q,R%q))
|
||||
rRot = tRotation(multiply_quaternion(self%q,R%q))
|
||||
call rRot%standardize()
|
||||
|
||||
end function rotRot__
|
||||
|
@ -268,7 +267,7 @@ end function rotRot__
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
pure elemental subroutine standardize(self)
|
||||
|
||||
class(rotation), intent(inout) :: self
|
||||
class(tRotation), intent(inout) :: self
|
||||
|
||||
if (sign(1.0_pReal,self%q(1)) < 0.0_pReal) self%q = - self%q
|
||||
|
||||
|
@ -282,7 +281,7 @@ end subroutine standardize
|
|||
pure function rotVector(self,v,active) result(vRot)
|
||||
|
||||
real(pReal), dimension(3) :: vRot
|
||||
class(rotation), intent(in) :: self
|
||||
class(tRotation), intent(in) :: self
|
||||
real(pReal), intent(in), dimension(3) :: v
|
||||
logical, intent(in), optional :: active
|
||||
|
||||
|
@ -318,7 +317,7 @@ end function rotVector
|
|||
pure function rotTensor2(self,T,active) result(tRot)
|
||||
|
||||
real(pReal), dimension(3,3) :: tRot
|
||||
class(rotation), intent(in) :: self
|
||||
class(tRotation), intent(in) :: self
|
||||
real(pReal), intent(in), dimension(3,3) :: T
|
||||
logical, intent(in), optional :: active
|
||||
|
||||
|
@ -347,7 +346,7 @@ end function rotTensor2
|
|||
pure function rotTensor4(self,T,active) result(tRot)
|
||||
|
||||
real(pReal), dimension(3,3,3,3) :: tRot
|
||||
class(rotation), intent(in) :: self
|
||||
class(tRotation), intent(in) :: self
|
||||
real(pReal), intent(in), dimension(3,3,3,3) :: T
|
||||
logical, intent(in), optional :: active
|
||||
|
||||
|
@ -379,7 +378,7 @@ end function rotTensor4
|
|||
pure function rotStiffness(self,C,active) result(cRot)
|
||||
|
||||
real(pReal), dimension(6,6) :: cRot
|
||||
class(rotation), intent(in) :: self
|
||||
class(tRotation), intent(in) :: self
|
||||
real(pReal), intent(in), dimension(6,6) :: C
|
||||
logical, intent(in), optional :: active
|
||||
|
||||
|
@ -416,8 +415,8 @@ end function rotStiffness
|
|||
!---------------------------------------------------------------------------------------------------
|
||||
pure elemental function misorientation(self,other)
|
||||
|
||||
type(rotation) :: misorientation
|
||||
class(rotation), intent(in) :: self, other
|
||||
type(tRotation) :: misorientation
|
||||
class(tRotation), intent(in) :: self, other
|
||||
|
||||
|
||||
misorientation%q = multiply_quaternion(other%q, conjugate_quaternion(self%q))
|
||||
|
@ -480,7 +479,7 @@ pure function qu2eu(qu) result(eu)
|
|||
atan2( 2.0_pReal*chi, q03-q12 ), &
|
||||
atan2(( P*qu(1)*qu(3)+qu(2)*qu(4))*chi, (-P*qu(1)*qu(2)+qu(3)*qu(4))*chi )]
|
||||
endif degenerated
|
||||
where(sign(1.0_pReal,eu)<0.0_pReal) eu = mod(eu+2.0_pReal*PI,[2.0_pReal*PI,PI,2.0_pReal*PI])
|
||||
where(sign(1.0_pReal,eu)<0.0_pReal) eu = mod(eu+TAU,[TAU,PI,TAU])
|
||||
|
||||
end function qu2eu
|
||||
|
||||
|
@ -628,7 +627,7 @@ pure function om2eu(om) result(eu)
|
|||
eu = [atan2(om(1,2),om(1,1)), 0.5_pReal*PI*(1.0_pReal-om(3,3)),0.0_pReal ]
|
||||
end if
|
||||
where(abs(eu) < 1.e-8_pReal) eu = 0.0_pReal
|
||||
where(sign(1.0_pReal,eu)<0.0_pReal) eu = mod(eu+2.0_pReal*PI,[2.0_pReal*PI,PI,2.0_pReal*PI])
|
||||
where(sign(1.0_pReal,eu)<0.0_pReal) eu = mod(eu+TAU,[TAU,PI,TAU])
|
||||
|
||||
end function om2eu
|
||||
|
||||
|
@ -1209,7 +1208,7 @@ pure function ho2cu(ho) result(cu)
|
|||
else special
|
||||
q2 = qxy + maxval(abs(xyz2))**2
|
||||
sq2 = sqrt(q2)
|
||||
q = (beta/R2/R1) * sqrt(q2*qxy/(q2-maxval(abs(xyz2))*sq2))
|
||||
q = (BETA/R2/R1) * sqrt(q2*qxy/(q2-maxval(abs(xyz2))*sq2))
|
||||
tt = (minval(abs(xyz2))**2+maxval(abs(xyz2))*sq2)/R2/qxy
|
||||
Tinv = q * sign(1.0_pReal,xyz2) * merge([ 1.0_pReal, acos(math_clip(tt,-1.0_pReal,1.0_pReal))/PI12], &
|
||||
[ acos(math_clip(tt,-1.0_pReal,1.0_pReal))/PI12, 1.0_pReal], &
|
||||
|
@ -1217,7 +1216,7 @@ pure function ho2cu(ho) result(cu)
|
|||
endif special
|
||||
|
||||
! inverse M_1
|
||||
xyz1 = [ Tinv(1), Tinv(2), sign(1.0_pReal,xyz3(3)) * rs / pref ] /sc
|
||||
xyz1 = [ Tinv(1), Tinv(2), sign(1.0_pReal,xyz3(3)) * rs / PREF ]/SC
|
||||
|
||||
! reverse the coordinates back to order according to the original pyramid number
|
||||
cu = xyz1(p(:,2))
|
||||
|
@ -1323,32 +1322,32 @@ pure function cu2ho(cu) result(ho)
|
|||
else center
|
||||
! get pyramide and scale by grid parameter ratio
|
||||
p = GetPyramidOrder(cu)
|
||||
XYZ = cu(p(:,1)) * sc
|
||||
XYZ = cu(p(:,1)) * SC
|
||||
|
||||
! intercept all the points along the z-axis
|
||||
special: if (all(dEq0(XYZ(1:2)))) then
|
||||
LamXYZ = [ 0.0_pReal, 0.0_pReal, pref * XYZ(3) ]
|
||||
LamXYZ = [ 0.0_pReal, 0.0_pReal, PREF * XYZ(3) ]
|
||||
else special
|
||||
order = merge( [2,1], [1,2], abs(XYZ(2)) <= abs(XYZ(1))) ! order of absolute values of XYZ
|
||||
q = PI12 * XYZ(order(1))/XYZ(order(2)) ! smaller by larger
|
||||
c = cos(q)
|
||||
s = sin(q)
|
||||
q = prek * XYZ(order(2))/ sqrt(R2-c)
|
||||
q = PREK * XYZ(order(2))/ sqrt(R2-c)
|
||||
T = [ (R2*c - 1.0), R2 * s] * q
|
||||
|
||||
! transform to sphere grid (inverse Lambert)
|
||||
! [note that there is no need to worry about dividing by zero, since XYZ(3) can not become zero]
|
||||
c = sum(T**2)
|
||||
s = Pi * c/(24.0*XYZ(3)**2)
|
||||
c = sPi * c / sqrt(24.0_pReal) / XYZ(3)
|
||||
s = PI * c/(24.0*XYZ(3)**2)
|
||||
c = sqrt(PI) * c / sqrt(24.0_pReal) / XYZ(3)
|
||||
q = sqrt( 1.0 - s )
|
||||
LamXYZ = [ T(order(2)) * q, T(order(1)) * q, pref * XYZ(3) - c ]
|
||||
endif special
|
||||
LamXYZ = [ T(order(2)) * q, T(order(1)) * q, PREF * XYZ(3) - c ]
|
||||
end if special
|
||||
|
||||
! reverse the coordinates back to order according to the original pyramid number
|
||||
ho = LamXYZ(p(:,2))
|
||||
|
||||
endif center
|
||||
end if center
|
||||
|
||||
end function cu2ho
|
||||
|
||||
|
@ -1416,7 +1415,7 @@ end function conjugate_quaternion
|
|||
!--------------------------------------------------------------------------------------------------
|
||||
subroutine selfTest()
|
||||
|
||||
type(rotation) :: R
|
||||
type(tRotation) :: R
|
||||
real(pReal), dimension(4) :: qu, ax, ro
|
||||
real(pReal), dimension(3) :: x, eu, ho, v3
|
||||
real(pReal), dimension(3,3) :: om, t33
|
||||
|
@ -1437,7 +1436,7 @@ subroutine selfTest()
|
|||
elseif(i==2) then
|
||||
qu = eu2qu([0.0_pReal,0.0_pReal,0.0_pReal])
|
||||
elseif(i==3) then
|
||||
qu = eu2qu([2.0_pReal*PI,PI,2.0_pReal*PI])
|
||||
qu = eu2qu([TAU,PI,TAU])
|
||||
elseif(i==4) then
|
||||
qu = [0.0_pReal,0.0_pReal,1.0_pReal,0.0_pReal]
|
||||
elseif(i==5) then
|
||||
|
@ -1448,10 +1447,10 @@ subroutine selfTest()
|
|||
call random_number(x)
|
||||
A = sqrt(x(3))
|
||||
B = sqrt(1-0_pReal -x(3))
|
||||
qu = [cos(2.0_pReal*PI*x(1))*A,&
|
||||
sin(2.0_pReal*PI*x(2))*B,&
|
||||
cos(2.0_pReal*PI*x(2))*B,&
|
||||
sin(2.0_pReal*PI*x(1))*A]
|
||||
qu = [cos(TAU*x(1))*A,&
|
||||
sin(TAU*x(2))*B,&
|
||||
cos(TAU*x(2))*B,&
|
||||
sin(TAU*x(1))*A]
|
||||
if(qu(1)<0.0_pReal) qu = qu * (-1.0_pReal)
|
||||
endif
|
||||
|
||||
|
@ -1504,7 +1503,8 @@ subroutine selfTest()
|
|||
|
||||
call random_number(C)
|
||||
C = C+transpose(C)
|
||||
if (any(dNeq(R%rotStiffness(C),math_3333toVoigt66(R%rotate(math_Voigt66to3333(C))),1.0e-12_pReal))) &
|
||||
if (any(dNeq(R%rotStiffness(C), &
|
||||
math_3333toVoigt66_stiffness(R%rotate(math_Voigt66to3333_stiffness(C))),1.0e-12_pReal))) &
|
||||
error stop 'rotStiffness'
|
||||
|
||||
call R%fromQuaternion(qu * (1.0_pReal + merge(+5.e-9_pReal,-5.e-9_pReal, mod(i,2) == 0))) ! allow reasonable tolerance for ASCII/YAML
|
||||
|
|
|
@ -24,7 +24,7 @@ module system_routines
|
|||
|
||||
function setCWD_C(cwd) bind(C)
|
||||
use, intrinsic :: ISO_C_Binding, only: C_INT, C_CHAR
|
||||
|
||||
|
||||
integer(C_INT) :: setCWD_C
|
||||
character(kind=C_CHAR), dimension(*), intent(in) :: cwd
|
||||
end function setCWD_C
|
||||
|
@ -150,14 +150,14 @@ function getUserName()
|
|||
getUserName = c_f_string(getUserName_Cstring)
|
||||
else
|
||||
getUserName = 'n/a (Error!)'
|
||||
endif
|
||||
end if
|
||||
|
||||
end function getUserName
|
||||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief convert C string to Fortran string
|
||||
!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string
|
||||
!> @brief Convert C string to Fortran string.
|
||||
!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
pure function c_f_string(c_string) result(f_string)
|
||||
|
||||
|
@ -174,28 +174,23 @@ pure function c_f_string(c_string) result(f_string)
|
|||
else
|
||||
f_string = f_string(:i-1)
|
||||
exit
|
||||
endif
|
||||
enddo arrayToString
|
||||
end if
|
||||
end do arrayToString
|
||||
|
||||
end function c_f_string
|
||||
|
||||
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
!> @brief convert Fortran string to C string
|
||||
!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string
|
||||
!> @brief Convert Fortran string to C string.
|
||||
!> @details: C string is NULL terminated and, hence, longer by one than the Fortran string.
|
||||
!--------------------------------------------------------------------------------------------------
|
||||
pure function f_c_string(f_string) result(c_string)
|
||||
|
||||
character(len=*), intent(in) :: f_string
|
||||
character(kind=C_CHAR), dimension(len_trim(f_string)+1) :: c_string
|
||||
|
||||
integer :: i
|
||||
|
||||
|
||||
do i=1,len_trim(f_string)
|
||||
c_string(i)=f_string(i:i)
|
||||
enddo
|
||||
c_string(len_trim(f_string)+1) = C_NULL_CHAR
|
||||
c_string = transfer(trim(f_string)//C_NULL_CHAR,c_string,size=size(c_string))
|
||||
|
||||
end function f_c_string
|
||||
|
||||
|
|
Loading…
Reference in New Issue