From 1314e17b4213c6ce9424eab80763edf4b2ae867f Mon Sep 17 00:00:00 2001 From: TRasmussen <33480590+TillRasmussen@users.noreply.github.com> Date: Thu, 11 Jan 2024 18:25:52 +0100 Subject: [PATCH] First round of housekeeping on ice_grid (#921) * removal of unused variables. * moved xav to transport. Could remove commented code. Could remove xav and yav as they are zero * Move derived parameters and only allocate if needed * bugfixes for cxp, cyp... * fix index and remove commented code in ice_grid * new version of transport_remap. xav, yav array where needed. xxav, yyav parameter * Removed comments rom ice_transport_remap and arrays for nonuniform grids --- cicecore/cicedyn/dynamics/ice_dyn_eap.F90 | 4 +- cicecore/cicedyn/dynamics/ice_dyn_evp.F90 | 53 ++++++- cicecore/cicedyn/dynamics/ice_dyn_shared.F90 | 86 ++++++++++- cicecore/cicedyn/dynamics/ice_dyn_vp.F90 | 26 ++-- .../cicedyn/dynamics/ice_transport_remap.F90 | 141 ++++-------------- cicecore/cicedyn/infrastructure/ice_grid.F90 | 99 ------------ 6 files changed, 165 insertions(+), 244 deletions(-) diff --git a/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 b/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 index e240fc8f1..a8ac62797 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 @@ -109,7 +109,7 @@ subroutine eap (dt) seabed_stress_factor_LKD, seabed_stress_factor_prob, & seabed_stress_method, seabed_stress, & stack_fields, unstack_fields, iceTmask, iceUmask, & - fld2, fld3, fld4 + fld2, fld3, fld4, dxhy, dyhx, cxp, cyp, cxm, cym use ice_flux, only: rdg_conv, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & @@ -118,7 +118,7 @@ subroutine eap (dt) stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 - use ice_grid, only: tmask, umask, dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & + use ice_grid, only: tmask, umask, dxT, dyT, & tarear, uarear, grid_average_X2Y, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & diff --git a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 index ee832e447..6a71d6a14 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 @@ -84,6 +84,12 @@ module ice_dyn_evp emass (:,:,:) , & ! total mass of ice and snow (E grid) emassdti (:,:,:) ! mass of E-cell/dte (kg/m^2 s) + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + ratiodxN , & ! - dxN(i+1,j) / dxN(i,j) + ratiodyE , & ! - dyE(i ,j+1) / dyE(i,j) + ratiodxNr , & ! 1 / ratiodxN + ratiodyEr ! 1 / ratiodyE + real (kind=dbl_kind), allocatable :: & strengthU(:,:,:) , & ! strength averaged to U points divergU (:,:,:) , & ! div array on U points, differentiate from divu @@ -119,9 +125,10 @@ module ice_dyn_evp ! Elastic-viscous-plastic dynamics driver ! subroutine init_evp - use ice_blocks, only: nx_block, ny_block, nghost - use ice_domain_size, only: max_blocks, nx_global, ny_global - use ice_grid, only: grid_ice, dyT, dxT, uarear, tmask, G_HTE, G_HTN + use ice_blocks, only: get_block, nx_block, ny_block, nghost, block + use ice_domain_size, only: max_blocks + use ice_domain, only: nblocks, blocks_ice + use ice_grid, only: grid_ice, dyT, dxT, uarear, tmask, G_HTE, G_HTN, dxN, dyE use ice_calendar, only: dt_dyn use ice_dyn_shared, only: init_dyn_shared, evp_algorithm use ice_dyn_evp1d, only: dyn_evp1d_init @@ -131,6 +138,14 @@ subroutine init_evp character(len=*), parameter :: subname = '(init_evp)' + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: & + i, j, iblk , & ! block index + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + call init_dyn_shared(dt_dyn) if (evp_algorithm == "shared_mem_1d" ) then @@ -197,6 +212,32 @@ subroutine init_evp stat=ierr) if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory E evp') + allocate( ratiodxN (nx_block,ny_block,max_blocks), & + ratiodyE (nx_block,ny_block,max_blocks), & + ratiodxNr(nx_block,ny_block,max_blocks), & + ratiodyEr(nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory ratio') + + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + ratiodxN (i,j,iblk) = - dxN(i+1,j ,iblk) / dxN(i,j,iblk) + ratiodyE (i,j,iblk) = - dyE(i ,j+1,iblk) / dyE(i,j,iblk) + ratiodxNr(i,j,iblk) = c1 / ratiodxN(i,j,iblk) + ratiodyEr(i,j,iblk) = c1 / ratiodyE(i,j,iblk) + enddo + enddo + enddo ! iblk + !$OMP END PARALLEL DO + endif end subroutine init_evp @@ -219,7 +260,7 @@ subroutine evp (dt) ice_HaloDestroy, ice_HaloUpdate_stress use ice_blocks, only: block, get_block, nx_block, ny_block, nghost use ice_domain, only: nblocks, blocks_ice, halo_info, maskhalo_dyn - use ice_domain_size, only: max_blocks, ncat, nx_global, ny_global + use ice_domain_size, only: max_blocks, ncat use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & @@ -238,8 +279,6 @@ subroutine evp (dt) stresspU, stressmU, stress12U use ice_grid, only: tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & dxE, dxN, dxT, dxU, dyE, dyN, dyT, dyU, & - ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & - dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, earear, narear, grid_average_X2Y, uarea, & grid_type, grid_ice, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv @@ -250,7 +289,7 @@ subroutine evp (dt) ice_timer_start, ice_timer_stop, timer_evp use ice_dyn_shared, only: evp_algorithm, stack_fields, unstack_fields, & DminTarea, visc_method, deformations, deformationsC_T, deformationsCD_T, & - strain_rates_U, & + strain_rates_U, dxhy, dyhx, cxp, cyp, cxm, cym, & iceTmask, iceUmask, iceEmask, iceNmask, & dyn_haloUpdate, fld2, fld3, fld4 use ice_dyn_evp1d, only: dyn_evp1d_run diff --git a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 index 9dbeaf1a7..d3f819f20 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 @@ -11,7 +11,7 @@ module ice_dyn_shared use ice_kinds_mod use ice_communicate, only: my_task, master_task, get_num_procs - use ice_constants, only: c0, c1, c2, c3, c4, c6 + use ice_constants, only: c0, c1, c2, c3, c4, c6, c1p5 use ice_constants, only: omega, spval_dbl, p01, p001, p5 use ice_blocks, only: nx_block, ny_block use ice_domain_size, only: max_blocks @@ -119,6 +119,14 @@ module ice_dyn_shared real (kind=dbl_kind), allocatable, public :: & DminTarea(:,:,:) ! deltamin * tarea (m^2/s) + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + cyp , & ! 1.5*HTE(i,j)-0.5*HTW(i,j) = 1.5*HTE(i,j)-0.5*HTE(i-1,j) + cxp , & ! 1.5*HTN(i,j)-0.5*HTS(i,j) = 1.5*HTN(i,j)-0.5*HTN(i,j-1) + cym , & ! 0.5*HTE(i,j)-1.5*HTW(i,j) = 0.5*HTE(i,j)-1.5*HTE(i-1,j) + cxm , & ! 0.5*HTN(i,j)-1.5*HTS(i,j) = 0.5*HTN(i,j)-1.5*HTN(i,j-1) + dxhy , & ! 0.5*(HTE(i,j) - HTW(i,j)) = 0.5*(HTE(i,j) - HTE(i-1,j)) + dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) + ! ice isotropic tensile strength parameter real (kind=dbl_kind), public :: & Ktens ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) @@ -192,6 +200,22 @@ subroutine alloc_dyn_shared stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') + allocate( & + cyp(nx_block,ny_block,max_blocks), & ! 1.5*HTE - 0.5*HTW + cxp(nx_block,ny_block,max_blocks), & ! 1.5*HTN - 0.5*HTS + cym(nx_block,ny_block,max_blocks), & ! 0.5*HTE - 1.5*HTW + cxm(nx_block,ny_block,max_blocks), & ! 0.5*HTN - 1.5*HTS + stat=ierr) + if (ierr/=0) call abort_ice(subname//': Out of memory') + + if (grid_ice == 'B' .and. evp_algorithm == "standard_2d") then + allocate( & + dxhy(nx_block,ny_block,max_blocks), & ! 0.5*(HTE - HTW) + dyhx(nx_block,ny_block,max_blocks), & ! 0.5*(HTN - HTS) + stat=ierr) + if (ierr/=0) call abort_ice(subname//': Out of memory') + endif + if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate( & uvelE_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep @@ -214,8 +238,9 @@ end subroutine alloc_dyn_shared subroutine init_dyn_shared (dt) - use ice_blocks, only: nx_block, ny_block - use ice_domain, only: nblocks, halo_dynbundle + use ice_blocks, only: block, get_block + use ice_boundary, only: ice_halo, ice_haloUpdate + use ice_domain, only: nblocks, halo_dynbundle, blocks_ice, halo_info use ice_domain_size, only: max_blocks use ice_flux, only: & stressp_1, stressp_2, stressp_3, stressp_4, & @@ -224,7 +249,8 @@ subroutine init_dyn_shared (dt) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN - use ice_grid, only: ULAT, NLAT, ELAT, tarea + use ice_grid, only: ULAT, NLAT, ELAT, tarea, HTE, HTN + use ice_constants, only: field_loc_center, field_type_vector real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -232,9 +258,13 @@ subroutine init_dyn_shared (dt) ! local variables integer (kind=int_kind) :: & - i, j , & ! indices - nprocs, & ! number of processors - iblk ! block index + i, j , & ! indices + ilo, ihi, jlo, jhi, & !min and max index for interior of blocks + nprocs, & ! number of processors + iblk ! block index + + type (block) :: & + this_block ! block information for current block character(len=*), parameter :: subname = '(init_dyn_shared)' @@ -333,6 +363,48 @@ subroutine init_dyn_shared (dt) enddo ! iblk !$OMP END PARALLEL DO + if (grid_ice == 'B' .and. evp_algorithm == "standard_2d") then + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + + do j = jlo, jhi + do i = ilo, ihi + dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) + dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) + enddo + enddo + enddo + + call ice_HaloUpdate (dxhy, halo_info, & + field_loc_center, field_type_vector, & + fillValue=c1) + call ice_HaloUpdate (dyhx, halo_info, & + field_loc_center, field_type_vector, & + fillValue=c1) + + endif + + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi+1 + do i = ilo, ihi+1 + cyp(i,j,iblk) = (c1p5*HTE(i,j,iblk) - p5*HTE(i-1,j,iblk)) + cxp(i,j,iblk) = (c1p5*HTN(i,j,iblk) - p5*HTN(i,j-1,iblk)) + ! match order of operations in cyp, cxp for tripole grids + cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) + cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) + enddo + enddo + enddo + end subroutine init_dyn_shared !======================================================================= diff --git a/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 index 58589f8d7..477d19515 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 @@ -52,7 +52,7 @@ module ice_dyn_vp use ice_fileunits, only: nu_diag use ice_flux, only: fmU use ice_global_reductions, only: global_sum - use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, uarear + use ice_grid, only: dxT, dyT, uarear use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_ice_strength, icepack_query_parameters @@ -120,19 +120,9 @@ subroutine init_vp use ice_boundary, only: ice_HaloUpdate use ice_constants, only: c1, & field_loc_center, field_type_scalar - use ice_domain, only: blocks_ice, halo_info + use ice_domain, only: blocks_ice use ice_calendar, only: dt_dyn use ice_dyn_shared, only: init_dyn_shared -! use ice_grid, only: tarea - - ! local variables - - integer (kind=int_kind) :: & - i, j, iblk, & - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - type (block) :: & - this_block ! block information for current block call init_dyn_shared(dt_dyn) @@ -167,7 +157,8 @@ subroutine implicit_solver (dt) use ice_blocks, only: block, get_block, nx_block, ny_block use ice_domain, only: blocks_ice, halo_info, maskhalo_dyn use ice_domain_size, only: max_blocks, ncat - use ice_dyn_shared, only: deformations, iceTmask, iceUmask + use ice_dyn_shared, only: deformations, iceTmask, iceUmask, & + cxp, cyp, cxm, cym use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & @@ -176,7 +167,7 @@ subroutine implicit_solver (dt) stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 - use ice_grid, only: tmask, umask, dxT, dyT, cxp, cyp, cxm, cym, & + use ice_grid, only: tmask, umask, dxT, dyT, & tarear, grid_type, grid_average_X2Y, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & @@ -686,9 +677,8 @@ subroutine anderson_solver (icellT , icellU , & use ice_domain, only: maskhalo_dyn, halo_info use ice_domain_size, only: max_blocks use ice_flux, only: fmU, TbU - use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & - uarear - use ice_dyn_shared, only: DminTarea + use ice_grid, only: dxT, dyT, uarear + use ice_dyn_shared, only: DminTarea, dxhy, dyhx, cxp, cyp, cxm, cym use ice_state, only: uvel, vvel, strength use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound @@ -2753,6 +2743,7 @@ subroutine fgmres (zetax2 , etax2 , & use ice_boundary, only: ice_HaloUpdate use ice_domain, only: maskhalo_dyn, halo_info use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + use ice_dyn_shared, only: dxhy, dyhx, cxp, cyp, cxm, cym real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) @@ -3154,6 +3145,7 @@ subroutine pgmres (zetax2 , etax2 , & use ice_boundary, only: ice_HaloUpdate use ice_domain, only: maskhalo_dyn, halo_info use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + use ice_dyn_shared, only: dyhx, dxhy, cxp, cyp, cxm, cym real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) diff --git a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 index dd59efc87..11521a0fa 100644 --- a/cicecore/cicedyn/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 @@ -25,6 +25,7 @@ ! can be specified (following an idea of Mats Bentsen) ! 2010: ECH removed unnecessary grid arrays and optional arguments from ! horizontal_remap +! 2023: TAR, DMI Remove commented code and unnecessary arrays module ice_transport_remap @@ -253,52 +254,15 @@ module ice_transport_remap ! ! Grid quantities used by the remapping transport scheme ! -! Note: the arrays xyav, xxxav, etc are not needed for rectangular grids -! but may be needed in the future for other nonuniform grids. They have -! been commented out here to save memory and flops. +! Note: Arrays needed for nonuniform grids has been deleted. +! They can be found in version 6.5 and earlier ! ! author William H. Lipscomb, LANL subroutine init_remap - use ice_domain, only: nblocks - use ice_grid, only: xav, yav, xxav, yyav -! dxT, dyT, xyav, & -! xxxav, xxyav, xyyav, yyyav - - integer (kind=int_kind) :: & - i, j, iblk ! standard indices - character(len=*), parameter :: subname = '(init_remap)' - ! Compute grid cell average geometric quantities on the scaled - ! rectangular grid with dx = 1, dy = 1. - ! - ! Note: On a rectangular grid, the integral of any odd function - ! of x or y = 0. - - !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - xav(i,j,iblk) = c0 - yav(i,j,iblk) = c0 -!!! These formulas would be used on a rectangular grid -!!! with dimensions (dxT, dyT): -!!! xxav(i,j,iblk) = dxT(i,j,iblk)**2 / c12 -!!! yyav(i,j,iblk) = dyT(i,j,iblk)**2 / c12 - xxav(i,j,iblk) = c1/c12 - yyav(i,j,iblk) = c1/c12 -! xyav(i,j,iblk) = c0 -! xxxav(i,j,iblk) = c0 -! xxyav(i,j,iblk) = c0 -! xyyav(i,j,iblk) = c0 -! yyyav(i,j,iblk) = c0 - enddo - enddo - enddo - !$OMP END PARALLEL DO - !------------------------------------------------------------------- ! Set logical l_fixed_area depending of the grid type. ! @@ -356,9 +320,7 @@ subroutine horizontal_remap (dt, ntrace, & use ice_domain, only: nblocks, blocks_ice, halo_info, maskhalo_remap use ice_blocks, only: block, get_block, nghost use ice_grid, only: HTE, HTN, dxu, dyu, & - earea, narea, tarear, hm, & - xav, yav, xxav, yyav -! xyav, xxxav, xxyav, xyyav, yyyav + earea, narea, tarear, hm use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound real (kind=dbl_kind), intent(in) :: & @@ -519,12 +481,7 @@ subroutine horizontal_remap (dt, ntrace, & tracer_type, depend, & has_dependents, icellsnc(0,iblk), & indxinc(:,0), indxjnc(:,0), & - hm (:,:,iblk), xav (:,:,iblk), & - yav (:,:,iblk), xxav (:,:,iblk), & - yyav (:,:,iblk), & -! xyav (:,:,iblk), & -! xxxav (:,:,iblk), xxyav (:,:,iblk), & -! xyyav (:,:,iblk), yyyav (:,:,iblk), & + hm (:,:,iblk), & mm (:,:,0,iblk), mc (:,:,0,iblk), & mx (:,:,0,iblk), my (:,:,0,iblk), & mmask(:,:,0) ) @@ -539,12 +496,7 @@ subroutine horizontal_remap (dt, ntrace, & tracer_type, depend, & has_dependents, icellsnc (n,iblk), & indxinc (:,n), indxjnc(:,n), & - hm (:,:,iblk), xav (:,:,iblk), & - yav (:,:,iblk), xxav (:,:,iblk), & - yyav (:,:,iblk), & -! xyav (:,:,iblk), & -! xxxav (:,:,iblk), xxyav (:,:,iblk), & -! xyyav (:,:,iblk), yyyav (:,:,iblk), & + hm (:,:,iblk), & mm (:,:,n,iblk), mc (:,:,n,iblk), & mx (:,:,n,iblk), my (:,:,n,iblk), & mmask (:,:,n), & @@ -1052,12 +1004,7 @@ subroutine construct_fields (nx_block, ny_block, & tracer_type, depend, & has_dependents, icells, & indxi, indxj, & - hm, xav, & - yav, xxav, & - yyav, & -! xyav, & -! xxxav, xxyav, & -! xyyav, yyyav, & + hm, & mm, mc, & mx, my, & mmask, & @@ -1084,11 +1031,7 @@ subroutine construct_fields (nx_block, ny_block, & indxj real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - hm , & ! land/boundary mask, thickness (T-cell) - xav, yav , & ! mean T-cell values of x, y - xxav, yyav ! mean T-cell values of xx, yy -! xyav, , & ! mean T-cell values of xy -! xxxav,xxyav,xyyav,yyyav ! mean T-cell values of xxx, xxy, xyy, yyy + hm ! land/boundary mask, thickness (T-cell) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & mm , & ! mean value of mass field @@ -1114,16 +1057,21 @@ subroutine construct_fields (nx_block, ny_block, & ij ! combined i/j horizontal index real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + xav , & ! mean T-cell values of x + yav , & ! mean T-cell values of y mxav , & ! x coordinate of center of mass myav ! y coordinate of center of mass + real (kind=dbl_kind), parameter :: xxav=c1/c12 ! mean T-cell values of xx + real (kind=dbl_kind), parameter :: yyav=c1/c12 ! mean T-cell values of yy + real (kind=dbl_kind), dimension (nx_block,ny_block,ntrace) :: & mtxav , & ! x coordinate of center of mass*tracer mtyav ! y coordinate of center of mass*tracer real (kind=dbl_kind) :: & puny, & - w1, w2, w3, w7 ! work variables + w2, w3, w7 ! work variables character(len=*), parameter :: subname = '(construct_fields)' @@ -1177,9 +1125,11 @@ subroutine construct_fields (nx_block, ny_block, & do j = 1, ny_block do i = 1, nx_block - mc(i,j) = c0 - mx(i,j) = c0 - my(i,j) = c0 + xav(i,j) = c0 + yav(i,j) = c0 + mc(i,j) = c0 + mx(i,j) = c0 + my(i,j) = c0 mxav(i,j) = c0 myav(i,j) = c0 enddo @@ -1213,12 +1163,8 @@ subroutine construct_fields (nx_block, ny_block, & j = indxj(ij) ! mass field at geometric center - ! echmod: xav = yav = 0 mc(i,j) = mm(i,j) -! mc(i,j) = mm(i,j) - xav(i,j)*mx(i,j) & -! - yav(i,j)*my(i,j) - enddo ! ij ! tracers @@ -1230,18 +1176,10 @@ subroutine construct_fields (nx_block, ny_block, & j = indxj(ij) ! center of mass (mxav,myav) for each cell - ! echmod: xyav = 0 - mxav(i,j) = (mx(i,j)*xxav(i,j) & - + mc(i,j)*xav (i,j)) / mm(i,j) - myav(i,j) = (my(i,j)*yyav(i,j) & - + mc(i,j)*yav(i,j)) / mm(i,j) - -! mxav(i,j) = (mx(i,j)*xxav(i,j) & -! + my(i,j)*xyav(i,j) & -! + mc(i,j)*xav (i,j)) / mm(i,j) -! myav(i,j) = (mx(i,j)*xyav(i,j) & -! + my(i,j)*yyav(i,j) & -! + mc(i,j)*yav(i,j)) / mm(i,j) + + mxav(i,j) = mx(i,j)*xxav / mm(i,j) + myav(i,j) = my(i,j)*yyav / mm(i,j) + enddo do nt = 1, ntrace @@ -1276,30 +1214,14 @@ subroutine construct_fields (nx_block, ny_block, & if (tmask(i,j,nt) > puny) then ! center of area*tracer - w1 = mc(i,j)*tc(i,j,nt) w2 = mc(i,j)*tx(i,j,nt) & + mx(i,j)*tc(i,j,nt) w3 = mc(i,j)*ty(i,j,nt) & + my(i,j)*tc(i,j,nt) -! w4 = mx(i,j)*tx(i,j,nt) -! w5 = mx(i,j)*ty(i,j,nt) & -! + my(i,j)*tx(i,j,nt) -! w6 = my(i,j)*ty(i,j,nt) w7 = c1 / (mm(i,j)*tm(i,j,nt)) ! echmod: grid arrays = 0 - mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j)) & - * w7 - mtyav(i,j,nt) = (w1*yav(i,j) + w3*yyav(i,j)) & - * w7 - -! mtxav(i,j,nt) = (w1*xav (i,j) + w2*xxav (i,j) & -! + w3*xyav (i,j) + w4*xxxav(i,j) & -! + w5*xxyav(i,j) + w6*xyyav(i,j)) & -! * w7 -! mtyav(i,j,nt) = (w1*yav(i,j) + w2*xyav (i,j) & -! + w3*yyav(i,j) + w4*xxyav(i,j) & -! + w5*xyyav(i,j) + w6*yyyav(i,j)) & -! * w7 + mtxav(i,j,nt) = w2*xxav *w7 + mtyav(i,j,nt) = w3*yyav * w7 endif ! tmask enddo ! ij @@ -1342,8 +1264,6 @@ subroutine construct_fields (nx_block, ny_block, & j = indxj(ij) tc(i,j,nt) = tm(i,j,nt) -! tx(i,j,nt) = c0 ! already initialized to 0. -! ty(i,j,nt) = c0 enddo ! ij endif ! tracer_type @@ -1355,7 +1275,6 @@ subroutine construct_fields (nx_block, ny_block, & end subroutine construct_fields !======================================================================= -! ! Compute a limited gradient of the scalar field phi in scaled coordinates. ! "Limited" means that we do not create new extrema in phi. For ! instance, field values at the cell corners can neither exceed the @@ -1379,12 +1298,14 @@ subroutine limited_gradient (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent (in) :: & phi , & ! input tracer field (mean values in each grid cell) - cnx , & ! x-coordinate of phi relative to geometric center of cell - cny , & ! y-coordinate of phi relative to geometric center of cell phimask ! phimask(i,j) = 1 if phi(i,j) has physical meaning, = 0 otherwise. ! For instance, aice has no physical meaning in land cells, ! and hice no physical meaning where aice = 0. + real (kind=dbl_kind), dimension (nx_block,ny_block), intent (in) :: & + cnx , & ! x-coordinate of phi relative to geometric center of cell + cny ! y-coordinate of phi relative to geometric center of cellĀ½ + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & gx , & ! limited x-direction gradient gy ! limited y-direction gradient @@ -3102,10 +3023,6 @@ subroutine locate_triangles (nx_block, ny_block, & write(nu_diag,*) '' write(nu_diag,*) 'WARNING: xp =', xp(i,j,nv,ng) write(nu_diag,*) 'm, i, j, ng, nv =', my_task, i, j, ng, nv -! write(nu_diag,*) 'yil,xdl,xcl,ydl=',yil,xdl,xcl,ydl -! write(nu_diag,*) 'yir,xdr,xcr,ydr=',yir,xdr,xcr,ydr -! write(nu_diag,*) 'ydm=',ydm -! stop endif if (abs(yp(i,j,nv,ng)) > p5+puny) then write(nu_diag,*) '' diff --git a/cicecore/cicedyn/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 index ef2db8a11..1cc3540ca 100644 --- a/cicecore/cicedyn/infrastructure/ice_grid.F90 +++ b/cicecore/cicedyn/infrastructure/ice_grid.F90 @@ -115,20 +115,6 @@ module ice_grid G_HTE , & ! length of eastern edge of T-cell (global ext.) G_HTN ! length of northern edge of T-cell (global ext.) - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - cyp , & ! 1.5*HTE(i,j)-0.5*HTW(i,j) = 1.5*HTE(i,j)-0.5*HTE(i-1,j) - cxp , & ! 1.5*HTN(i,j)-0.5*HTS(i,j) = 1.5*HTN(i,j)-0.5*HTN(i,j-1) - cym , & ! 0.5*HTE(i,j)-1.5*HTW(i,j) = 0.5*HTE(i,j)-1.5*HTE(i-1,j) - cxm , & ! 0.5*HTN(i,j)-1.5*HTS(i,j) = 0.5*HTN(i,j)-1.5*HTN(i,j-1) - dxhy , & ! 0.5*(HTE(i,j) - HTW(i,j)) = 0.5*(HTE(i,j) - HTE(i-1,j)) - dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) - - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - ratiodxN , & ! - dxN(i+1,j) / dxN(i,j) - ratiodyE , & ! - dyE(i ,j+1) / dyE(i,j) - ratiodxNr , & ! 1 / ratiodxN - ratiodyEr ! 1 / ratiodyE - ! grid dimensions for rectangular grid real (kind=dbl_kind), public :: & dxrect, & ! user_specified spacing (cm) in x-direction (uniform HTN) @@ -154,26 +140,6 @@ module ice_grid lone_bounds, & ! longitude of gridbox corners for E point late_bounds ! latitude of gridbox corners for E point - ! geometric quantities used for remapping transport - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - xav , & ! mean T-cell value of x - yav , & ! mean T-cell value of y - xxav , & ! mean T-cell value of xx -! xyav , & ! mean T-cell value of xy -! yyav , & ! mean T-cell value of yy - yyav ! mean T-cell value of yy -! xxxav, & ! mean T-cell value of xxx -! xxyav, & ! mean T-cell value of xxy -! xyyav, & ! mean T-cell value of xyy -! yyyav ! mean T-cell value of yyy - - real (kind=dbl_kind), & - dimension (:,:,:,:,:), allocatable, public :: & - mne, & ! matrices used for coordinate transformations in remapping - mnw, & ! ne = northeast corner, nw = northwest, etc. - mse, & - msw - ! masks real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & hm , & ! land/boundary mask, thickness (T-cell) @@ -256,16 +222,6 @@ subroutine alloc_grid ANGLET (nx_block,ny_block,max_blocks), & ! ANGLE converted to T-cells bathymetry(nx_block,ny_block,max_blocks),& ! ocean depth, for grounding keels and bergs (m) ocn_gridcell_frac(nx_block,ny_block,max_blocks),& ! only relevant for lat-lon grids - cyp (nx_block,ny_block,max_blocks), & ! 1.5*HTE - 0.5*HTW - cxp (nx_block,ny_block,max_blocks), & ! 1.5*HTN - 0.5*HTS - cym (nx_block,ny_block,max_blocks), & ! 0.5*HTE - 1.5*HTW - cxm (nx_block,ny_block,max_blocks), & ! 0.5*HTN - 1.5*HTS - dxhy (nx_block,ny_block,max_blocks), & ! 0.5*(HTE - HTW) - dyhx (nx_block,ny_block,max_blocks), & ! 0.5*(HTN - HTS) - xav (nx_block,ny_block,max_blocks), & ! mean T-cell value of x - yav (nx_block,ny_block,max_blocks), & ! mean T-cell value of y - xxav (nx_block,ny_block,max_blocks), & ! mean T-cell value of xx - yyav (nx_block,ny_block,max_blocks), & ! mean T-cell value of yy hm (nx_block,ny_block,max_blocks), & ! land/boundary mask, thickness (T-cell) bm (nx_block,ny_block,max_blocks), & ! task/block id uvm (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) - water in case of all water point @@ -288,23 +244,9 @@ subroutine alloc_grid latn_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for N point lone_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for E point late_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for E point - mne (2,2,nx_block,ny_block,max_blocks), & ! matrices used for coordinate transformations in remapping - mnw (2,2,nx_block,ny_block,max_blocks), & ! ne = northeast corner, nw = northwest, etc. - mse (2,2,nx_block,ny_block,max_blocks), & - msw (2,2,nx_block,ny_block,max_blocks), & stat=ierr) if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory1') - if (grid_ice == 'CD' .or. grid_ice == 'C') then - allocate( & - ratiodxN (nx_block,ny_block,max_blocks), & - ratiodyE (nx_block,ny_block,max_blocks), & - ratiodxNr(nx_block,ny_block,max_blocks), & - ratiodyEr(nx_block,ny_block,max_blocks), & - stat=ierr) - if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory2') - endif - if (save_ghte_ghtn) then if (my_task == master_task) then allocate( & @@ -599,34 +541,6 @@ subroutine init_grid2 enddo enddo - do j = jlo, jhi - do i = ilo, ihi - dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) - dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) - enddo - enddo - - do j = jlo, jhi+1 - do i = ilo, ihi+1 - cyp(i,j,iblk) = (c1p5*HTE(i,j,iblk) - p5*HTE(i-1,j,iblk)) - cxp(i,j,iblk) = (c1p5*HTN(i,j,iblk) - p5*HTN(i,j-1,iblk)) - ! match order of operations in cyp, cxp for tripole grids - cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) - cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) - enddo - enddo - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - do j = jlo, jhi - do i = ilo, ihi - ratiodxN (i,j,iblk) = - dxN(i+1,j ,iblk) / dxN(i,j,iblk) - ratiodyE (i,j,iblk) = - dyE(i ,j+1,iblk) / dyE(i,j,iblk) - ratiodxNr(i,j,iblk) = c1 / ratiodxN(i,j,iblk) - ratiodyEr(i,j,iblk) = c1 / ratiodyE(i,j,iblk) - enddo - enddo - endif - enddo ! iblk !$OMP END PARALLEL DO @@ -642,13 +556,6 @@ subroutine init_grid2 call ice_timer_start(timer_bound) - call ice_HaloUpdate (dxhy, halo_info, & - field_loc_center, field_type_vector, & - fillValue=c1) - call ice_HaloUpdate (dyhx, halo_info, & - field_loc_center, field_type_vector, & - fillValue=c1) - ! Update just on the tripole seam to ensure bit-for-bit symmetry across seam call ice_HaloUpdate (tarea, halo_info, & field_loc_center, field_type_scalar, & @@ -1353,12 +1260,6 @@ subroutine latlongrid dyN (i,j,iblk) = 1.e36_dbl_kind dxE (i,j,iblk) = 1.e36_dbl_kind dyE (i,j,iblk) = 1.e36_dbl_kind - dxhy (i,j,iblk) = 1.e36_dbl_kind - dyhx (i,j,iblk) = 1.e36_dbl_kind - cyp (i,j,iblk) = 1.e36_dbl_kind - cxp (i,j,iblk) = 1.e36_dbl_kind - cym (i,j,iblk) = 1.e36_dbl_kind - cxm (i,j,iblk) = 1.e36_dbl_kind enddo enddo enddo