Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Refactor evp1d grid metrics, remove G_HTE and G_HTN. #20

Closed
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions cicecore/cicedyn/dynamics/ice_dyn_core1d.F90
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ module ice_dyn_core1d
! arguments ------------------------------------------------------------------
subroutine stress_1d (ee, ne, se, lb, ub, &
uvel, vvel, dxT, dyT, skipme, strength, &
hte, htn, htem1, htnm1, &
cxp, cyp, cxm, cym, dxhy, dyhx, &
stressp_1, stressp_2, stressp_3, stressp_4, &
stressm_1, stressm_2, stressm_3, stressm_4, &
stress12_1, stress12_2, stress12_3, stress12_4, &
Expand All @@ -94,10 +94,9 @@ subroutine stress_1d (ee, ne, se, lb, ub, &
vvel , & ! y-component of velocity (m/s)
dxT , & ! width of T-cell through the middle (m)
dyT , & ! height of T-cell through the middle (m)
hte , &
htn , &
htem1 , &
htnm1
cxp, cyp , & ! grid metrics
cxm, cym , & !
dxhy, dyhx !

real (kind=dbl_kind), dimension(:), intent(inout), contiguous :: &
stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22
Expand Down Expand Up @@ -165,7 +164,8 @@ subroutine stress_1d (ee, ne, se, lb, ub, &
!$omp tmp_cxp, tmp_cyp, tmp_cxm, tmp_cym , &
!$omp tmp_strength, tmp_DminTarea, tmparea , &
!$omp tmp_dxhy, tmp_dyhx) &
!$omp shared(uvel,vvel,dxT,dyT,htn,hte,htnm1,htem1 , &
!$omp shared(uvel,vvel,dxT,dyT , &
!$omp cxp, cyp, cxm, cym, dxhy, dyhx , &
!$omp str1,str2,str3,str4,str5,str6,str7,str8 , &
!$omp stressp_1,stressp_2,stressp_3,stressp_4 , &
!$omp stressm_1,stressm_2,stressm_3,stressm_4 , &
Expand All @@ -188,15 +188,15 @@ subroutine stress_1d (ee, ne, se, lb, ub, &
tmp_uvel_se = uvel(se(iw))
tmp_dxT = dxT(iw)
tmp_dyT = dyT(iw)
tmp_cxp = c1p5 * htn(iw) - p5 * htnm1(iw)
tmp_cyp = c1p5 * hte(iw) - p5 * htem1(iw)
tmp_cxm = -(c1p5 * htnm1(iw) - p5 * htn(iw))
tmp_cym = -(c1p5 * htem1(iw) - p5 * hte(iw))
tmp_cxp = cxp(iw)
tmp_cyp = cyp(iw)
tmp_cxm = cxm(iw)
tmp_cym = cym(iw)
tmp_strength = strength(iw)
tmparea = dxT(iw) * dyT(iw) ! necessary to split calc of DminTarea. Otherwize not binary identical
tmp_DminTarea = deltaminEVP * tmparea
tmp_dxhy = p5 * (hte(iw) - htem1(iw))
tmp_dyhx = p5 * (htn(iw) - htnm1(iw))
tmp_dxhy = dxhy(iw)
tmp_dyhx = dyhx(iw)

!--------------------------------------------------------------------------
! strain rates - NOTE these are actually strain rates * area (m^2/s)
Expand Down
6 changes: 4 additions & 2 deletions cicecore/cicedyn/dynamics/ice_dyn_evp.F90
Original file line number Diff line number Diff line change
Expand Up @@ -121,7 +121,8 @@ module ice_dyn_evp
subroutine init_evp
use ice_blocks, only: nx_block, ny_block, nghost
use ice_domain_size, only: max_blocks, nx_global, ny_global
use ice_grid, only: grid_ice, dyT, dxT, uarear, tmask, G_HTE, G_HTN
use ice_grid, only: grid_ice, dyT, dxT, uarear, tmask, &
cxp, cyp, cxm, cym, dxhy, dyhx
use ice_calendar, only: dt_dyn
use ice_dyn_shared, only: init_dyn_shared, evp_algorithm
use ice_dyn_evp1d, only: dyn_evp1d_init
Expand All @@ -136,7 +137,8 @@ subroutine init_evp
if (evp_algorithm == "shared_mem_1d" ) then
call dyn_evp1d_init(nx_global , ny_global, nx_block, ny_block, &
max_blocks, nghost , dyT , dxT , &
uarear , tmask , G_HTE , G_HTN)
uarear , tmask , cxp , cyp , &
cxm , cym , dxhy , dyhx)
endif

allocate( uocnU (nx_block,ny_block,max_blocks), & ! i ocean current (m/s)
Expand Down
110 changes: 57 additions & 53 deletions cicecore/cicedyn/dynamics/ice_dyn_evp1d.F90
Original file line number Diff line number Diff line change
Expand Up @@ -35,15 +35,14 @@ module ice_dyn_evp1d
! indexes
integer(kind=int_kind), allocatable, dimension(:,:) :: iwidx
logical(kind=log_kind), allocatable, dimension(:) :: skipTcell,skipUcell
real (kind=dbl_kind), allocatable, dimension(:) :: HTE,HTN, HTEm1,HTNm1
integer(kind=int_kind), allocatable, dimension(:) :: ee,ne,se,nw,sw,sse ! arrays for neighbour points
integer(kind=int_kind), allocatable, dimension(:) :: indxti, indxtj, indxTij

! 1D arrays to allocate
real(kind=dbl_kind) , allocatable, dimension(:) :: &
cdn_ocn,aiu,uocn,vocn,waterxU,wateryU,forcexU,forceyU,umassdti,fmU,uarear, &
strintxU,strintyU,uvel_init,vvel_init, &
strength, uvel, vvel, dxt, dyt, &
strength, uvel, vvel, dxt, dyt, cxp, cyp, cxm, cym, dxhy, dyhx, &
stressp_1, stressp_2, stressp_3, stressp_4, stressm_1, stressm_2, &
stressm_3, stressm_4, stress12_1, stress12_2, stress12_3, stress12_4, &
str1, str2, str3, str4, str5, str6, str7, str8, Tbu, Cb
Expand All @@ -68,26 +67,30 @@ module ice_dyn_evp1d
! This follows CICE naming
! In addition all water points are assumed to be active and allocated thereafter.
subroutine dyn_evp1d_init(nx_global, ny_global, cnx_block, cny_block, &
cmax_block, cnghost , &
L_dyT, L_dxT, L_uarear, L_tmask, G_HTE, G_HTN)
cmax_block, cnghost, &
L_dyT, L_dxT, L_uarear, L_tmask, &
L_cxp, L_cyp, L_cxm, L_cym, L_dxhy, L_dyhx)

use ice_communicate, only : master_task
use ice_gather_scatter, only : gather_global_ext
use ice_domain, only : distrb_info
! Note that TMask is ocean/land
implicit none

integer(kind=int_kind), intent(in) :: &
nx_global, ny_global, cnx_block, cny_block, cmax_block, cnghost
real(kind=dbl_kind) , dimension(:,:,:), intent(in) :: &
L_dyT, L_dxT, L_uarear
L_dyT, L_dxT, L_uarear, L_cxp, L_cyp, L_cxm, L_cym, L_dxhy, L_dyhx
logical(kind=log_kind), dimension(:,:,:), intent(in) :: &
L_tmask

! variables for Global arrays (domain size)
real(kind=dbl_kind) , dimension(:,:) , intent(in) :: G_HTE,G_HTN
real(kind=dbl_kind) , dimension(:,:) , allocatable :: G_dyT, G_dxT, G_uarear
logical(kind=log_kind), dimension(:,:) , allocatable :: G_tmask
real(kind=dbl_kind) , dimension(:,:), allocatable :: &
G_dyT, G_dxT, G_uarear, G_cxp, G_cyp, G_cxm, G_cym, G_dxhy, G_dyhx
logical(kind=log_kind), dimension(:,:), allocatable :: G_tmask

integer(kind=int_kind) :: ios, ierr
character(len=*), parameter :: subname = '(dyn_evp1d_init)'
integer(kind=int_kind) :: ios, ierr

nx_block=cnx_block
ny_block=cny_block
Expand All @@ -98,14 +101,26 @@ subroutine dyn_evp1d_init(nx_global, ny_global, cnx_block, cny_block, &
ny=ny_global+2*nghost

allocate(G_dyT(nx,ny),G_dxT(nx,ny),G_uarear(nx,ny),G_tmask(nx,ny),stat=ierr)
if (ierr/=0) then
call abort_ice(subname//' ERROR: allocating', file=__FILE__, line=__LINE__)
endif

allocate(G_cxp(nx,ny),G_cyp(nx,ny),G_cxm(nx,ny),G_cym(nx,ny),G_dxhy(nx,ny),G_dyhx(nx,ny),stat=ierr)
if (ierr/=0) then
call abort_ice(subname//' ERROR: allocating', file=__FILE__, line=__LINE__)
endif

! gather from blks to global
call gather_static(L_uarear, L_dxT, L_dyT, L_Tmask, &
G_uarear, G_dxT, G_dyT, G_Tmask)
! copy from distributed I_* to G_*
call gather_global_ext(G_uarear, L_uarear, master_task, distrb_info)
call gather_global_ext(G_dxT , L_dxT , master_task, distrb_info)
call gather_global_ext(G_dyT , L_dyT , master_task, distrb_info)
call gather_global_ext(G_Tmask , L_Tmask , master_task, distrb_info)
call gather_global_ext(G_cxp , L_cxp , master_task, distrb_info)
call gather_global_ext(G_cyp , L_cyp , master_task, distrb_info)
call gather_global_ext(G_cxm , L_cxm , master_task, distrb_info)
call gather_global_ext(G_cym , L_cym , master_task, distrb_info)
call gather_global_ext(G_dxhy , L_dxhy , master_task, distrb_info)
call gather_global_ext(G_dyhx , L_dyhx , master_task, distrb_info)

! calculate number of water points (T and U). Only needed for the static version
! Tmask in ocean/ice
Expand All @@ -116,10 +131,14 @@ subroutine dyn_evp1d_init(nx_global, ny_global, cnx_block, cny_block, &
call calc_navel(nActive, navel)
call evp1d_alloc_static_navel(navel)
call numainit(1,nActive,navel)
call convert_2d_1d_init(nActive,G_HTE, G_HTN, G_uarear, G_dxT, G_dyT)
call convert_2d_1d_init(nActive, G_uarear, G_dxT, G_dyT, &
G_cxp, G_cyp, G_cxm, G_cym, G_dxhy, G_dyhx)
call evp1d_alloc_static_halo()
endif

deallocate(G_dyT,G_dxT,G_uarear,G_tmask)
deallocate(G_cxp, G_cyp, G_cxm, G_cym, G_dxhy, G_dyhx)

end subroutine dyn_evp1d_init

!=============================================================================
Expand Down Expand Up @@ -223,7 +242,8 @@ subroutine dyn_evp1d_run(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 ,
call ice_timer_start(timer_evp1dcore)
#ifdef _OPENMP_TARGET
!$omp target data map(to: ee, ne, se, nw, sw, sse, skipUcell, skipTcell,&
!$omp strength, dxT, dyT, HTE,HTN,HTEm1, HTNm1, &
!$omp strength, dxT, dyT, &
!$omp cxp, cyp, cxm, cym, dxhy, dyhx, &
!$omp forcexU, forceyU, umassdti, fmU, uarear, &
!$omp uvel_init, vvel_init, Tbu, Cb, &
!$omp str1, str2, str3, str4, str5, str6, str7, str8, &
Expand All @@ -247,7 +267,7 @@ subroutine dyn_evp1d_run(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 ,
do ksub = 1,ndte ! subcycling
call stress_1d (ee, ne, se, 1, nActive, &
uvel, vvel, dxT, dyT, skipTcell, strength, &
HTE, HTN, HTEm1, HTNm1, &
cxp, cyp, cxm, cym, dxhy, dyhx, &
stressp_1, stressp_2, stressp_3, stressp_4, &
stressm_1, stressm_2, stressm_3, stressm_4, &
stress12_1, stress12_2, stress12_3, stress12_4, &
Expand Down Expand Up @@ -369,12 +389,14 @@ subroutine evp1d_alloc_static_na(na0)
call abort_ice(subname//' ERROR: allocating', file=__FILE__, line=__LINE__)
endif

allocate( HTE (1:na0), &
HTN (1:na0), &
HTEm1 (1:na0), &
HTNm1 (1:na0), &
dxt (1:na0), &
allocate( dxt (1:na0), &
dyt (1:na0), &
cxp (1:na0), &
cyp (1:na0), &
cxm (1:na0), &
cym (1:na0), &
dxhy (1:na0), &
dyhx (1:na0), &
strength (1:na0), &
stressp_1 (1:na0), &
stressp_2 (1:na0), &
Expand Down Expand Up @@ -595,29 +617,6 @@ subroutine union(x, y, xdim, ydim, xy, nxy)
nxy = k - 1
end subroutine union

!=============================================================================
subroutine gather_static(L_uarear, L_dxT, L_dyT,L_Tmask, G_uarear,G_dxT,G_dyT, G_Tmask)
! In standalone distrb_info is an integer. Not needed anyway
use ice_communicate, only : master_task
use ice_gather_scatter, only : gather_global_ext
use ice_domain, only : distrb_info
implicit none

real(kind=dbl_kind) , dimension(nx_block, ny_block, max_block), intent(in) :: &
L_uarear, L_dxT, L_dyT

logical(kind=log_kind), dimension(nx_block, ny_block, max_block), intent(in) :: L_Tmask
real(kind=dbl_kind) , dimension(:, :), intent(out) :: G_uarear,G_dxT,G_dyT
logical(kind=log_kind), dimension(:, :), intent(out) :: G_Tmask
character(len=*), parameter :: subname = '(gather_static)'

! copy from distributed I_* to G_*
call gather_global_ext(G_uarear, L_uarear, master_task, distrb_info)
call gather_global_ext(G_dxT , L_dxT , master_task, distrb_info)
call gather_global_ext(G_dyT , L_dyT , master_task, distrb_info)
call gather_global_ext(G_Tmask , L_Tmask , master_task, distrb_info)
end subroutine gather_static

!=============================================================================
subroutine gather_dyn(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , &
L_stressm_1 , L_stressm_2 , L_stressm_3 , L_stressm_4 , &
Expand Down Expand Up @@ -766,12 +765,13 @@ subroutine scatter_dyn(L_stressp_1 , L_stressp_2 , L_stressp_3 , L_stressp_4 , &
end subroutine scatter_dyn

!=============================================================================
subroutine convert_2d_1d_init(na0, G_HTE, G_HTN, G_uarear, G_dxT, G_dyT)
subroutine convert_2d_1d_init(na0, G_uarear, G_dxT, G_dyT, G_cxp, G_cyp, G_cxm, G_cym, G_dxhy, G_dyhx)

implicit none

integer(kind=int_kind), intent(in) :: na0
real (kind=dbl_kind), dimension(:, :), intent(in) :: G_HTE, G_HTN, G_uarear, G_dxT, G_dyT
real (kind=dbl_kind), dimension(:, :), intent(in) :: &
G_uarear, G_dxT, G_dyT, G_cxp, G_cyp, G_cxm, G_cym, G_dxhy, G_dyhx
! local variables

integer(kind=int_kind) :: iw, lo, up, j, i
Expand Down Expand Up @@ -835,10 +835,12 @@ subroutine convert_2d_1d_init(na0, G_HTE, G_HTN, G_uarear, G_dxT, G_dyT)
uarear(iw) = G_uarear(i, j)
dxT(iw) = G_dxT(i, j)
dyT(iw) = G_dyT(i, j)
HTE(iw) = G_HTE(i, j)
HTN(iw) = G_HTN(i, j)
HTEm1(iw) = G_HTE(i - 1, j)
HTNm1(iw) = G_HTN(i, j - 1)
cxp(iw) = G_cxp(i, j)
cyp(iw) = G_cyp(i, j)
cxm(iw) = G_cxm(i, j)
cym(iw) = G_cym(i, j)
dxhy(iw) = G_dxhy(i, j)
dyhx(iw) = G_dyhx(i, j)
end do
end subroutine convert_2d_1d_init

Expand Down Expand Up @@ -1142,15 +1144,17 @@ subroutine numainit(lo,up,uu)
aiu(iw)=c0
Cb(iw)=c0
cdn_ocn(iw)=c0
cxp(iw)=c0
cyp(iw)=c0
cxm(iw)=c0
cym(iw)=c0
dxhy(iw)=c0
dyhx(iw)=c0
dxt(iw)=c0
dyt(iw)=c0
fmU(iw)=c0
forcexU(iw)=c0
forceyU(iw)=c0
HTE(iw)=c0
HTEm1(iw)=c0
HTN(iw)=c0
HTNm1(iw)=c0
strength(iw)= c0
stress12_1(iw)=c0
stress12_2(iw)=c0
Expand Down
13 changes: 5 additions & 8 deletions cicecore/cicedyn/general/ice_init.F90
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@ subroutine input_data
grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, &
grid_atm, grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, &
dxrect, dyrect, dxscale, dyscale, scale_dxdy, &
lonrefrect, latrefrect, pgl_global_ext
lonrefrect, latrefrect
use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve, &
evp_algorithm, visc_method, &
seabed_stress, seabed_stress_method, &
Expand Down Expand Up @@ -375,7 +375,6 @@ subroutine input_data
ndte = 120 ! subcycles per dynamics timestep: ndte=dt_dyn/dte
evp_algorithm = 'standard_2d' ! EVP kernel (standard_2d=standard cice evp; shared_mem_1d=1d shared memory and no mpi
elasticDamp = 0.36_dbl_kind ! coefficient for calculating the parameter E
pgl_global_ext = .false. ! if true, init primary grid lengths (global ext.)
brlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared
arlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared
revised_evp = .false. ! if true, use revised procedure for evp dynamics
Expand Down Expand Up @@ -962,7 +961,6 @@ subroutine input_data
call broadcast_scalar(ndte, master_task)
call broadcast_scalar(evp_algorithm, master_task)
call broadcast_scalar(elasticDamp, master_task)
call broadcast_scalar(pgl_global_ext, master_task)
call broadcast_scalar(brlx, master_task)
call broadcast_scalar(arlx, master_task)
call broadcast_scalar(revised_evp, master_task)
Expand Down Expand Up @@ -1840,11 +1838,10 @@ subroutine input_data
tmpstr2 = ' : standard 2d EVP solver'
elseif (evp_algorithm == 'shared_mem_1d') then
tmpstr2 = ' : vectorized 1d EVP solver'
pgl_global_ext = .true.
else
tmpstr2 = ' : unknown value'
endif
write(nu_diag,1031) ' evp_algorithm = ', trim(evp_algorithm),trim(tmpstr2)
write(nu_diag,1030) ' evp_algorithm = ', trim(evp_algorithm),trim(tmpstr2)
write(nu_diag,1020) ' ndtd = ', ndtd, ' : number of dynamics/advection/ridging/steps per thermo timestep'
write(nu_diag,1020) ' ndte = ', ndte, ' : number of EVP or EAP subcycles'
endif
Expand Down Expand Up @@ -2277,17 +2274,17 @@ subroutine input_data
write(nu_diag,1010) ' use_smliq_pnd = ', use_smliq_pnd, trim(tmpstr2)
if (snw_aging_table == 'test') then
tmpstr2 = ' : Using 5x5x1 test matrix of internallly defined snow aging parameters'
write(nu_diag,1030) ' snw_aging_table = ', trim(snw_aging_table),trim(tmpstr2)
write(nu_diag,1030) ' snw_aging_table = ',trim(snw_aging_table),trim(tmpstr2)
elseif (snw_aging_table == 'snicar') then
tmpstr2 = ' : Reading 3D snow aging parameters from SNICAR file'
write(nu_diag,1030) ' snw_aging_table = ', trim(snw_aging_table),trim(tmpstr2)
write(nu_diag,1030) ' snw_aging_table = ',trim(snw_aging_table),trim(tmpstr2)
write(nu_diag,1031) ' snw_filename = ',trim(snw_filename)
write(nu_diag,1031) ' snw_tau_fname = ',trim(snw_tau_fname)
write(nu_diag,1031) ' snw_kappa_fname = ',trim(snw_kappa_fname)
write(nu_diag,1031) ' snw_drdt0_fname = ',trim(snw_drdt0_fname)
elseif (snw_aging_table == 'file') then
tmpstr2 = ' : Reading 1D and 3D snow aging dimensions and parameters from external file'
write(nu_diag,1030) ' snw_aging_table = ', trim(snw_aging_table),trim(tmpstr2)
write(nu_diag,1030) ' snw_aging_table = ',trim(snw_aging_table),trim(tmpstr2)
write(nu_diag,1031) ' snw_filename = ',trim(snw_filename)
write(nu_diag,1031) ' snw_rhos_fname = ',trim(snw_rhos_fname)
write(nu_diag,1031) ' snw_Tgrd_fname = ',trim(snw_Tgrd_fname)
Expand Down
Loading
Loading