From 547f2ffa69fcb2b2548968d37189f74de1f113c6 Mon Sep 17 00:00:00 2001 From: Martin Diehl Date: Sun, 11 Apr 2021 09:46:11 +0200 Subject: [PATCH] cleaning --- src/DAMASK_Marc.f90 | 3 +-- src/grid/discretization_grid.f90 | 2 +- src/grid/grid_damage_spectral.f90 | 14 ++++++-------- src/grid/grid_thermal_spectral.f90 | 10 +++++----- 4 files changed, 13 insertions(+), 16 deletions(-) diff --git a/src/DAMASK_Marc.f90 b/src/DAMASK_Marc.f90 index 43ec9b084..910ca86c0 100644 --- a/src/DAMASK_Marc.f90 +++ b/src/DAMASK_Marc.f90 @@ -218,7 +218,7 @@ subroutine hypela2(d,g,e,de,s,t,dt,ngens,m,nn,kcus,matus,ndi,nshear,disp, & logical :: cutBack real(pReal), dimension(6) :: stress real(pReal), dimension(6,6) :: ddsdde - integer :: computationMode, i, cp_en, node, CPnodeID + integer :: computationMode, i, node, CPnodeID integer(pI32) :: defaultNumThreadsInt !< default value set by Marc integer(pInt), save :: & @@ -266,7 +266,6 @@ subroutine hypela2(d,g,e,de,s,t,dt,ngens,m,nn,kcus,matus,ndi,nshear,disp, & computationMode = CPFEM_RESTOREJACOBIAN elseif (lovl == 6) then ! stress requested by marc computationMode = CPFEM_CALCRESULTS - cp_en = discretization_Marc_FEM2DAMASK_elem(m(1)) if (cptim > theTime .or. inc /= theInc) then ! reached "convergence" terminallyIll = .false. cycleCounter = -1 ! first calc step increments this to cycle = 0 diff --git a/src/grid/discretization_grid.f90 b/src/grid/discretization_grid.f90 index 88216f0aa..14391fd23 100644 --- a/src/grid/discretization_grid.f90 +++ b/src/grid/discretization_grid.f90 @@ -74,7 +74,7 @@ subroutine discretization_grid_init(restart) allocate(materialAt_global(0)) ! needed for IntelMPI endif - if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1') + if (grid(1) < 2) call IO_error(844, ext_msg='cells(1) must be larger than 1') call MPI_Bcast(grid,3,MPI_INTEGER,0,PETSC_COMM_WORLD, ierr) if (ierr /= 0) error stop 'MPI error' diff --git a/src/grid/grid_damage_spectral.f90 b/src/grid/grid_damage_spectral.f90 index 7e0d4112a..7fe9a4a25 100644 --- a/src/grid/grid_damage_spectral.f90 +++ b/src/grid/grid_damage_spectral.f90 @@ -259,7 +259,6 @@ subroutine formResidual(in,x_scal,f_scal,dummy,ierr) PetscObject :: dummy PetscErrorCode :: ierr integer :: i, j, k, ce - real(pReal) :: mobility phi_current = x_scal !-------------------------------------------------------------------------------------------------- @@ -281,9 +280,8 @@ subroutine formResidual(in,x_scal,f_scal,dummy,ierr) ce = 0 do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) ce = ce + 1 - mobility = homogenization_mu_phi(ce) scalarField_real(i,j,k) = params%timeinc*(scalarField_real(i,j,k) + homogenization_f_phi(phi_current(i,j,k),ce)) & - + mobility*(phi_lastInc(i,j,k) - phi_current(i,j,k)) & + + homogenization_mu_phi(ce)*(phi_lastInc(i,j,k) - phi_current(i,j,k)) & + mu_ref*phi_current(i,j,k) enddo; enddo; enddo @@ -309,16 +307,16 @@ end subroutine formResidual !-------------------------------------------------------------------------------------------------- subroutine updateReference - integer :: i,j,k,ce,ierr + integer :: ce,ierr + - ce = 0 K_ref = 0.0_pReal mu_ref = 0.0_pReal - do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) - ce = ce + 1 + do ce = 1, product(grid(1:2))*grid3 K_ref = K_ref + homogenization_K_phi(ce) mu_ref = mu_ref + homogenization_mu_phi(ce) - enddo; enddo; enddo + enddo + K_ref = K_ref*wgt call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) mu_ref = mu_ref*wgt diff --git a/src/grid/grid_thermal_spectral.f90 b/src/grid/grid_thermal_spectral.f90 index ea6caec0e..60d8d99db 100644 --- a/src/grid/grid_thermal_spectral.f90 +++ b/src/grid/grid_thermal_spectral.f90 @@ -302,16 +302,16 @@ end subroutine formResidual !-------------------------------------------------------------------------------------------------- subroutine updateReference - integer :: i,j,k,ce,ierr + integer :: ce,ierr + - ce = 0 K_ref = 0.0_pReal mu_ref = 0.0_pReal - do k = 1, grid3; do j = 1, grid(2); do i = 1,grid(1) - ce = ce + 1 + do ce = 1, product(grid(1:2))*grid3 K_ref = K_ref + homogenization_K_T(ce) mu_ref = mu_ref + homogenization_mu_T(ce) - enddo; enddo; enddo + enddo + K_ref = K_ref*wgt call MPI_Allreduce(MPI_IN_PLACE,K_ref,9,MPI_DOUBLE,MPI_SUM,PETSC_COMM_WORLD,ierr) mu_ref = mu_ref*wgt