Skip to content

Commit

Permalink
Update CICE to consortium master (NOAA-EMC#23)
Browse files Browse the repository at this point in the history
updates include:

* deprecate upwind advection (CICE-Consortium#508)
* add implicit VP solver (CICE-Consortium#491)

update icepack

update gitmodules, update icepack

switch icepack branches

* update to icepack master but set abort flag in ITD routine
to false

update icepack
  • Loading branch information
DeniseWorthen committed May 10, 2024
1 parent 6d238c7 commit 297aa57
Show file tree
Hide file tree
Showing 33 changed files with 8,744 additions and 1,689 deletions.
1,953 changes: 987 additions & 966 deletions cicecore/cicedyn/dynamics/ice_dyn_eap.F90

Large diffs are not rendered by default.

14 changes: 11 additions & 3 deletions cicecore/cicedyn/general/ice_forcing.F90
Original file line number Diff line number Diff line change
Expand Up @@ -5059,23 +5059,31 @@ end subroutine ocn_data_ispol_init
subroutine box2001_data

! wind and current fields as in Hunke, JCP 2001
! these are defined at the u point
! authors: Elizabeth Hunke, LANL

use ice_domain, only: nblocks
use ice_domain_size, only: max_blocks
use ice_blocks, only: nx_block, ny_block, nghost
use ice_flux, only: uocn, vocn, uatm, vatm, wind, rhoa, strax, stray
use ice_grid, only: uvm
use ice_grid, only: uvm, to_ugrid
use ice_state, only: aice

! local parameters

integer (kind=int_kind) :: &
iblk, i,j ! loop indices

real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: &
aiu ! ice fraction on u-grid

real (kind=dbl_kind) :: &
secday, pi , puny, period, pi2, tau
call icepack_query_parameters(pi_out=pi, pi2_out=pi2, puny_out=puny)
call icepack_query_parameters(secday_out=secday)

call to_ugrid(aice, aiu)

period = c4*secday

do iblk = 1, nblocks
Expand Down Expand Up @@ -5106,8 +5114,8 @@ subroutine box2001_data
! wind stress
wind(i,j,iblk) = sqrt(uatm(i,j,iblk)**2 + vatm(i,j,iblk)**2)
tau = rhoa(i,j,iblk) * 0.0012_dbl_kind * wind(i,j,iblk)
strax(i,j,iblk) = tau * uatm(i,j,iblk)
stray(i,j,iblk) = tau * vatm(i,j,iblk)
strax(i,j,iblk) = aiu(i,j,iblk) * tau * uatm(i,j,iblk)
stray(i,j,iblk) = aiu(i,j,iblk) * tau * vatm(i,j,iblk)

! initialization test
! Diagonal wind vectors 1
Expand Down
4 changes: 3 additions & 1 deletion cicecore/cicedyn/general/ice_step_mod.F90
Original file line number Diff line number Diff line change
Expand Up @@ -848,6 +848,7 @@ subroutine step_dyn_horiz (dt)

use ice_dyn_evp, only: evp
use ice_dyn_eap, only: eap
use ice_dyn_vp, only: implicit_solver
use ice_dyn_shared, only: kdyn, ktransport
use ice_flux, only: init_history_dyn
!deprecate upwind use ice_transport_driver, only: advection, transport_upwind, transport_remap
Expand All @@ -861,11 +862,12 @@ subroutine step_dyn_horiz (dt)
call init_history_dyn ! initialize dynamic history variables

!-----------------------------------------------------------------
! Elastic-viscous-plastic ice dynamics
! Ice dynamics (momentum equation)
!-----------------------------------------------------------------

if (kdyn == 1) call evp (dt)
if (kdyn == 2) call eap (dt)
if (kdyn == 3) call implicit_solver (dt)

!-----------------------------------------------------------------
! Horizontal ice transport
Expand Down
70 changes: 70 additions & 0 deletions cicecore/cicedyn/infrastructure/comm/mpi/ice_global_reductions.F90
Original file line number Diff line number Diff line change
Expand Up @@ -36,6 +36,7 @@ module ice_global_reductions
private

public :: global_sum, &
global_allreduce_sum, &
global_sum_prod, &
global_maxval, &
global_minval
Expand Down Expand Up @@ -64,6 +65,12 @@ module ice_global_reductions
global_sum_scalar_int
end interface

interface global_allreduce_sum
module procedure global_allreduce_sum_vector_dbl!, &
! module procedure global_allreduce_sum_vector_real, & ! not yet implemented
! module procedure global_allreduce_sum_vector_int ! not yet implemented
end interface

interface global_sum_prod
module procedure global_sum_prod_dbl, &
global_sum_prod_real, &
Expand Down Expand Up @@ -749,6 +756,69 @@ function global_sum_scalar_int(scalar, dist) &

end function global_sum_scalar_int

!***********************************************************************

function global_allreduce_sum_vector_dbl(vector, dist) &
result(globalSums)

! Computes the global sums of sets of scalars (elements of 'vector')
! distributed across a parallel machine.
!
! This is actually the specific interface for the generic global_allreduce_sum
! function corresponding to double precision vectors. The generic
! interface is identical but will handle real and integer vectors.

real (dbl_kind), dimension(:), intent(in) :: &
vector ! vector whose components are to be summed

type (distrb), intent(in) :: &
dist ! block distribution

real (dbl_kind), dimension(size(vector)) :: &
globalSums ! resulting array of global sums

!-----------------------------------------------------------------------
!
! local variables
!
!-----------------------------------------------------------------------

integer (int_kind) :: &
ierr, &! mpi error flag
numProcs, &! number of processor participating
numBlocks, &! number of local blocks
communicator, &! communicator for this distribution
numElem ! number of elements in vector

real (dbl_kind), dimension(:,:), allocatable :: &
work ! temporary local array

character(len=*), parameter :: subname = '(global_allreduce_sum_vector_dbl)'

!-----------------------------------------------------------------------
!
! get communicator for MPI calls
!
!-----------------------------------------------------------------------

call ice_distributionGet(dist, &
numLocalBlocks = numBlocks, &
nprocs = numProcs, &
communicator = communicator)

numElem = size(vector)
allocate(work(1,numElem))
work(1,:) = vector
globalSums = c0

call compute_sums_dbl(work,globalSums,communicator,numProcs)

deallocate(work)

!-----------------------------------------------------------------------

end function global_allreduce_sum_vector_dbl

!***********************************************************************

function global_sum_prod_dbl (array1, array2, dist, field_loc, &
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -37,6 +37,7 @@ module ice_global_reductions
private

public :: global_sum, &
global_allreduce_sum, &
global_sum_prod, &
global_maxval, &
global_minval
Expand Down Expand Up @@ -65,6 +66,12 @@ module ice_global_reductions
global_sum_scalar_int
end interface

interface global_allreduce_sum
module procedure global_allreduce_sum_vector_dbl!, &
! module procedure global_allreduce_sum_vector_real, & ! not yet implemented
! module procedure global_allreduce_sum_vector_int ! not yet implemented
end interface

interface global_sum_prod
module procedure global_sum_prod_dbl, &
global_sum_prod_real, &
Expand Down Expand Up @@ -750,6 +757,69 @@ function global_sum_scalar_int(scalar, dist) &

end function global_sum_scalar_int

!***********************************************************************

function global_allreduce_sum_vector_dbl(vector, dist) &
result(globalSums)

! Computes the global sums of sets of scalars (elements of 'vector')
! distributed across a parallel machine.
!
! This is actually the specific interface for the generic global_allreduce_sum
! function corresponding to double precision vectors. The generic
! interface is identical but will handle real and integer vectors.

real (dbl_kind), dimension(:), intent(in) :: &
vector ! vector whose components are to be summed

type (distrb), intent(in) :: &
dist ! block distribution

real (dbl_kind), dimension(size(vector)) :: &
globalSums ! resulting array of global sums

!-----------------------------------------------------------------------
!
! local variables
!
!-----------------------------------------------------------------------

integer (int_kind) :: &
ierr, &! mpi error flag
numProcs, &! number of processor participating
numBlocks, &! number of local blocks
communicator, &! communicator for this distribution
numElem ! number of elements in vector

real (dbl_kind), dimension(:,:), allocatable :: &
work ! temporary local array

character(len=*), parameter :: subname = '(global_allreduce_sum_vector_dbl)'

!-----------------------------------------------------------------------
!
! get communicator for MPI calls
!
!-----------------------------------------------------------------------

call ice_distributionGet(dist, &
numLocalBlocks = numBlocks, &
nprocs = numProcs, &
communicator = communicator)

numElem = size(vector)
allocate(work(1,numElem))
work(1,:) = vector
globalSums = c0

call compute_sums_dbl(work,globalSums,communicator,numProcs)

deallocate(work)

!-----------------------------------------------------------------------

end function global_allreduce_sum_vector_dbl

!***********************************************************************

function global_sum_prod_dbl (array1, array2, dist, field_loc, &
Expand Down
Loading

0 comments on commit 297aa57

Please sign in to comment.