From d088bfba02fecc8a756fc5e27ffd1400f6f3c787 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Fri, 15 Jul 2022 07:42:53 -0700 Subject: [PATCH 01/57] Update some CICE variable names to clarify grid (#729) * Rename several variables to make compatible with C-grid names, strairxU, strairyU, strtltxU, strtltyU, strintxU, strintyU, taubxU, taubyU, strocnxU, strocnyU, fmU, TbU, waterxU, wateryU, forcexU, forceyU, aiU Move iceumask, icenmask, iceemask from ice_flux to ice_grid Make dyn_prep2, stepu, stepuv_CD, stepv_C, implicit_solver, and anderson_soler argument names a little more generic/specific Inline boxslotcyl velocity calculation * remove boxslotcyl_data_vel * update documentation * Additional updates to change to upper case for some variable names Fix indentations as noted --- .../cicedynB/analysis/ice_diagnostics.F90 | 36 +- cicecore/cicedynB/analysis/ice_history.F90 | 50 +- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 80 +-- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 119 ++--- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 492 +++++++++--------- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 188 +++---- cicecore/cicedynB/general/ice_flux.F90 | 85 ++- cicecore/cicedynB/general/ice_init.F90 | 73 +-- cicecore/cicedynB/infrastructure/ice_grid.F90 | 11 +- .../infrastructure/ice_restart_driver.F90 | 13 +- doc/source/cice_index.rst | 12 +- 11 files changed, 559 insertions(+), 600 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_diagnostics.F90 b/cicecore/cicedynB/analysis/ice_diagnostics.F90 index fb9fc5f03..ec5ad05fa 100644 --- a/cicecore/cicedynB/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedynB/analysis/ice_diagnostics.F90 @@ -1761,7 +1761,7 @@ subroutine print_state(plabel,i,j,iblk) uvelE, vvelE, uvelN, vvelN, trcrn use ice_flux, only: uatm, vatm, potT, Tair, Qa, flw, frain, fsnow, & fsens, flat, evap, flwout, swvdr, swvdf, swidr, swidf, rhoa, & - frzmlt, sst, sss, Tf, Tref, Qref, Uref, uocn, vocn, strtltx, strtlty + frzmlt, sst, sss, Tf, Tref, Qref, Uref, uocn, vocn, strtltxU, strtltyU character (len=20), intent(in) :: plabel @@ -1907,14 +1907,14 @@ subroutine print_state(plabel,i,j,iblk) write(nu_diag,*) ' fsnow = ',fsnow(i,j,iblk) write(nu_diag,*) ' ' write(nu_diag,*) 'ocn states and fluxes' - write(nu_diag,*) ' frzmlt = ',frzmlt (i,j,iblk) - write(nu_diag,*) ' sst = ',sst (i,j,iblk) - write(nu_diag,*) ' sss = ',sss (i,j,iblk) - write(nu_diag,*) ' Tf = ',Tf (i,j,iblk) - write(nu_diag,*) ' uocn = ',uocn (i,j,iblk) - write(nu_diag,*) ' vocn = ',vocn (i,j,iblk) - write(nu_diag,*) ' strtltx = ',strtltx(i,j,iblk) - write(nu_diag,*) ' strtlty = ',strtlty(i,j,iblk) + write(nu_diag,*) ' frzmlt = ',frzmlt (i,j,iblk) + write(nu_diag,*) ' sst = ',sst (i,j,iblk) + write(nu_diag,*) ' sss = ',sss (i,j,iblk) + write(nu_diag,*) ' Tf = ',Tf (i,j,iblk) + write(nu_diag,*) ' uocn = ',uocn (i,j,iblk) + write(nu_diag,*) ' vocn = ',vocn (i,j,iblk) + write(nu_diag,*) ' strtltxU= ',strtltxU(i,j,iblk) + write(nu_diag,*) ' strtltyU= ',strtltyU(i,j,iblk) write(nu_diag,*) ' ' write(nu_diag,*) 'srf states and fluxes' write(nu_diag,*) ' Tref = ',Tref (i,j,iblk) @@ -1944,7 +1944,7 @@ subroutine print_points_state(plabel,ilabel) uvelE, vvelE, uvelE, vvelE, trcrn use ice_flux, only: uatm, vatm, potT, Tair, Qa, flw, frain, fsnow, & fsens, flat, evap, flwout, swvdr, swvdf, swidr, swidf, rhoa, & - frzmlt, sst, sss, Tf, Tref, Qref, Uref, uocn, vocn, strtltx, strtlty + frzmlt, sst, sss, Tf, Tref, Qref, Uref, uocn, vocn, strtltxU, strtltyU character (len=*), intent(in),optional :: plabel integer , intent(in),optional :: ilabel @@ -2060,14 +2060,14 @@ subroutine print_points_state(plabel,ilabel) write(nu_diag,*) ' fsnow = ',fsnow(i,j,iblk) write(nu_diag,*) ' ' write(nu_diag,*) 'ocn states and fluxes' - write(nu_diag,*) ' frzmlt = ',frzmlt (i,j,iblk) - write(nu_diag,*) ' sst = ',sst (i,j,iblk) - write(nu_diag,*) ' sss = ',sss (i,j,iblk) - write(nu_diag,*) ' Tf = ',Tf (i,j,iblk) - write(nu_diag,*) ' uocn = ',uocn (i,j,iblk) - write(nu_diag,*) ' vocn = ',vocn (i,j,iblk) - write(nu_diag,*) ' strtltx = ',strtltx(i,j,iblk) - write(nu_diag,*) ' strtlty = ',strtlty(i,j,iblk) + write(nu_diag,*) ' frzmlt = ',frzmlt (i,j,iblk) + write(nu_diag,*) ' sst = ',sst (i,j,iblk) + write(nu_diag,*) ' sss = ',sss (i,j,iblk) + write(nu_diag,*) ' Tf = ',Tf (i,j,iblk) + write(nu_diag,*) ' uocn = ',uocn (i,j,iblk) + write(nu_diag,*) ' vocn = ',vocn (i,j,iblk) + write(nu_diag,*) ' strtltxU= ',strtltxU(i,j,iblk) + write(nu_diag,*) ' strtltyU= ',strtltyU(i,j,iblk) write(nu_diag,*) ' ' write(nu_diag,*) 'srf states and fluxes' write(nu_diag,*) ' Tref = ',Tref (i,j,iblk) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index 2fc57044e..f3ca9b33e 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -2104,13 +2104,13 @@ subroutine accum_hist (dt) Tair, Tref, Qref, congel, frazil, frazil_diag, snoice, dsnow, & melts, meltb, meltt, meltl, fresh, fsalt, fresh_ai, fsalt_ai, & fhocn, fhocn_ai, uatm, vatm, fbot, Tbot, Tsnice, fswthru_ai, & - strairx, strairy, strtltx, strtlty, strintx, strinty, & - taubx, tauby, strocnx, strocny, & + strairxU, strairyU, strtltxU, strtltyU, strintxU, strintyU, & + taubxU, taubyU, strocnxU, strocnyU, & strairxN, strairyN, strtltxN, strtltyN, strintxN, strintyN, & taubxN, taubyN, strocnxN, strocnyN, & strairxE, strairyE, strtltxE, strtltyE, strintxE, strintyE, & taubxE, taubyE, strocnxE, strocnyE, & - fm, fmN, fmE, daidtt, dvidtt, daidtd, dvidtd, fsurf, & + fmU, fmN, fmE, daidtt, dvidtt, daidtd, dvidtd, fsurf, & fcondtop, fcondbot, fsurfn, fcondtopn, flatn, fsensn, albcnt, snwcnt, & stressp_1, stressm_1, stress12_1, & stresspT, stressmT, stress12T, & @@ -2528,29 +2528,29 @@ subroutine accum_hist (dt) call accum_hist_field(n_fswthru_ai,iblk, fswthru_ai(:,:,iblk), a2D) if (f_strairx(1:1) /= 'x') & - call accum_hist_field(n_strairx, iblk, strairx(:,:,iblk), a2D) + call accum_hist_field(n_strairx, iblk, strairxU(:,:,iblk), a2D) if (f_strairy(1:1) /= 'x') & - call accum_hist_field(n_strairy, iblk, strairy(:,:,iblk), a2D) + call accum_hist_field(n_strairy, iblk, strairyU(:,:,iblk), a2D) if (f_strtltx(1:1) /= 'x') & - call accum_hist_field(n_strtltx, iblk, strtltx(:,:,iblk), a2D) + call accum_hist_field(n_strtltx, iblk, strtltxU(:,:,iblk), a2D) if (f_strtlty(1:1) /= 'x') & - call accum_hist_field(n_strtlty, iblk, strtlty(:,:,iblk), a2D) + call accum_hist_field(n_strtlty, iblk, strtltyU(:,:,iblk), a2D) if (f_strcorx(1:1) /= 'x') & - call accum_hist_field(n_strcorx, iblk, fm(:,:,iblk)*vvel(:,:,iblk), a2D) + call accum_hist_field(n_strcorx, iblk, fmU(:,:,iblk)*vvel(:,:,iblk), a2D) if (f_strcory(1:1) /= 'x') & - call accum_hist_field(n_strcory, iblk,-fm(:,:,iblk)*uvel(:,:,iblk), a2D) + call accum_hist_field(n_strcory, iblk,-fmU(:,:,iblk)*uvel(:,:,iblk), a2D) if (f_strocnx(1:1) /= 'x') & - call accum_hist_field(n_strocnx, iblk, strocnx(:,:,iblk), a2D) + call accum_hist_field(n_strocnx, iblk, strocnxU(:,:,iblk), a2D) if (f_strocny(1:1) /= 'x') & - call accum_hist_field(n_strocny, iblk, strocny(:,:,iblk), a2D) + call accum_hist_field(n_strocny, iblk, strocnyU(:,:,iblk), a2D) if (f_strintx(1:1) /= 'x') & - call accum_hist_field(n_strintx, iblk, strintx(:,:,iblk), a2D) + call accum_hist_field(n_strintx, iblk, strintxU(:,:,iblk), a2D) if (f_strinty(1:1) /= 'x') & - call accum_hist_field(n_strinty, iblk, strinty(:,:,iblk), a2D) + call accum_hist_field(n_strinty, iblk, strintyU(:,:,iblk), a2D) if (f_taubx(1:1) /= 'x') & - call accum_hist_field(n_taubx, iblk, taubx(:,:,iblk), a2D) + call accum_hist_field(n_taubx, iblk, taubxU(:,:,iblk), a2D) if (f_tauby(1:1) /= 'x') & - call accum_hist_field(n_tauby, iblk, tauby(:,:,iblk), a2D) + call accum_hist_field(n_tauby, iblk, taubyU(:,:,iblk), a2D) if (f_strairxN(1:1) /= 'x') & call accum_hist_field(n_strairxN, iblk, strairxN(:,:,iblk), a2D) if (f_strairyN(1:1) /= 'x') & @@ -2791,7 +2791,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice_init(i,j,iblk) > puny) & - worka(i,j) = aice(i,j,iblk)*(aice(i,j,iblk)*strairx(i,j,iblk)/aice_init(i,j,iblk)) + worka(i,j) = aice(i,j,iblk)*(aice(i,j,iblk)*strairxU(i,j,iblk)/aice_init(i,j,iblk)) enddo enddo call accum_hist_field(n_sistrxdtop, iblk, worka(:,:), a2D) @@ -2802,7 +2802,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice_init(i,j,iblk) > puny) & - worka(i,j) = aice(i,j,iblk)*(aice(i,j,iblk)*strairy(i,j,iblk)/aice_init(i,j,iblk)) + worka(i,j) = aice(i,j,iblk)*(aice(i,j,iblk)*strairyU(i,j,iblk)/aice_init(i,j,iblk)) enddo enddo call accum_hist_field(n_sistrydtop, iblk, worka(:,:), a2D) @@ -2813,7 +2813,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) & - worka(i,j) = aice(i,j,iblk)*strocnx(i,j,iblk) + worka(i,j) = aice(i,j,iblk)*strocnxU(i,j,iblk) enddo enddo call accum_hist_field(n_sistrxubot, iblk, worka(:,:), a2D) @@ -2824,7 +2824,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) & - worka(i,j) = aice(i,j,iblk)*strocny(i,j,iblk) + worka(i,j) = aice(i,j,iblk)*strocnyU(i,j,iblk) enddo enddo call accum_hist_field(n_sistryubot, iblk, worka(:,:), a2D) @@ -3293,7 +3293,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*strtltx(i,j,iblk) + worka(i,j) = aice(i,j,iblk)*strtltxU(i,j,iblk) endif enddo enddo @@ -3305,7 +3305,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*strtlty(i,j,iblk) + worka(i,j) = aice(i,j,iblk)*strtltyU(i,j,iblk) endif enddo enddo @@ -3317,7 +3317,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*fm(i,j,iblk)*vvel(i,j,iblk) + worka(i,j) = aice(i,j,iblk)*fmU(i,j,iblk)*vvel(i,j,iblk) endif enddo enddo @@ -3329,7 +3329,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) then - worka(i,j) = -aice(i,j,iblk)*fm(i,j,iblk)*uvel(i,j,iblk) + worka(i,j) = -aice(i,j,iblk)*fmU(i,j,iblk)*uvel(i,j,iblk) endif enddo enddo @@ -3341,7 +3341,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*strintx(i,j,iblk) + worka(i,j) = aice(i,j,iblk)*strintxU(i,j,iblk) endif enddo enddo @@ -3353,7 +3353,7 @@ subroutine accum_hist (dt) do j = jlo, jhi do i = ilo, ihi if (aice(i,j,iblk) > puny) then - worka(i,j) = aice(i,j,iblk)*strinty(i,j,iblk) + worka(i,j) = aice(i,j,iblk)*strintyU(i,j,iblk) endif enddo enddo diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 317a6ba0d..5cf0b5dbc 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -136,15 +136,15 @@ subroutine eap (dt) seabed_stress_method, seabed_stress, & stack_fields, unstack_fields use ice_flux, only: rdg_conv, strairxT, strairyT, & - strairx, strairy, uocn, vocn, ss_tltx, ss_tlty, iceumask, fm, & - strtltx, strtlty, strocnx, strocny, strintx, strinty, taubx, tauby, & + strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & + strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & strocnxT, strocnyT, strax, stray, & - Tbu, hwater, & + TbU, hwater, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, grid_average_X2Y, & + tarear, uarear, grid_average_X2Y, iceumask, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength @@ -178,11 +178,11 @@ subroutine eap (dt) ss_tltxU , & ! sea surface slope, x-direction (m/m) ss_tltyU , & ! sea surface slope, y-direction (m/m) tmass , & ! total mass of ice and snow (kg/m^2) - waterx , & ! for ocean stress calculation, x (m/s) - watery , & ! for ocean stress calculation, y (m/s) - forcex , & ! work array: combined atm stress and ocn tilt, x - forcey , & ! work array: combined atm stress and ocn tilt, y - aiu , & ! ice fraction on u-grid + waterxU , & ! for ocean stress calculation, x (m/s) + wateryU , & ! for ocean stress calculation, y (m/s) + forcexU , & ! work array: combined atm stress and ocn tilt, x + forceyU , & ! work array: combined atm stress and ocn tilt, y + aiU , & ! ice fraction on u-grid umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) @@ -271,7 +271,7 @@ subroutine eap (dt) !----------------------------------------------------------------- call grid_average_X2Y('F', tmass , 'T' , umass, 'U') - call grid_average_X2Y('F', aice_init, 'T' , aiu , 'U') + call grid_average_X2Y('F', aice_init, 'T' , aiU , 'U') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU , 'U') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU , 'U') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') @@ -288,15 +288,15 @@ subroutine eap (dt) file=__FILE__, line=__LINE__) if (.not. calc_strair) then - call grid_average_X2Y('F', strax, grid_atm_dynu, strairx, 'U') - call grid_average_X2Y('F', stray, grid_atm_dynv, strairy, 'U') + call grid_average_X2Y('F', strax, grid_atm_dynu, strairxU, 'U') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairyU, 'U') else call ice_HaloUpdate (strairxT, halo_info, & field_loc_center, field_type_vector) call ice_HaloUpdate (strairyT, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('F', strairxT, 'T', strairx, 'U') - call grid_average_X2Y('F', strairyT, 'T', strairy, 'U') + call grid_average_X2Y('F', strairxT, 'T', strairxU, 'U') + call grid_average_X2Y('F', strairyT, 'T', strairyU, 'U') endif !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) @@ -317,20 +317,20 @@ subroutine eap (dt) icellt (iblk), icellu (iblk), & indxti (:,iblk), indxtj (:,iblk), & indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), umass (:,:,iblk), & + aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & + strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & icetmask (:,:,iblk), iceumask (:,:,iblk), & - fm (:,:,iblk), dt, & - strtltx (:,:,iblk), strtlty (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & + fmU (:,:,iblk), dt, & + strtltxU (:,:,iblk), strtltyU (:,:,iblk), & + strocnxU (:,:,iblk), strocnyU (:,:,iblk), & + strintxU (:,:,iblk), strintyU (:,:,iblk), & + taubxU (:,:,iblk), taubyU (:,:,iblk), & + waterxU (:,:,iblk), wateryU (:,:,iblk), & + forcexU (:,:,iblk), forceyU (:,:,iblk), & stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & @@ -339,7 +339,7 @@ subroutine eap (dt) stress12_3(:,:,iblk), stress12_4(:,:,iblk), & uvel_init (:,:,iblk), vvel_init (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - Tbu (:,:,iblk)) + TbU (:,:,iblk)) !----------------------------------------------------------------- ! Initialize structure tensor @@ -413,7 +413,7 @@ subroutine eap (dt) endif !----------------------------------------------------------------- - ! seabed stress factor Tbu (Tbu is part of Cb coefficient) + ! seabed stress factor TbU (TbU is part of Cb coefficient) !----------------------------------------------------------------- if (seabed_stress) then @@ -424,7 +424,7 @@ subroutine eap (dt) icellu (iblk), & indxui (:,iblk), indxuj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), Tbu (:,:,iblk)) + hwater(:,:,iblk), TbU (:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -435,7 +435,7 @@ subroutine eap (dt) icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk), & - hwater (:,:,iblk), Tbu (:,:,iblk)) + hwater (:,:,iblk), TbU (:,:,iblk)) enddo !$OMP END PARALLEL DO endif @@ -493,17 +493,17 @@ subroutine eap (dt) call stepu (nx_block, ny_block, & icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), strtmp (:,:,:), & + aiU (:,:,iblk), strtmp (:,:,:), & uocnU (:,:,iblk), vocnU (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & - umassdti (:,:,iblk), fm (:,:,iblk), & + waterxU (:,:,iblk), wateryU (:,:,iblk), & + forcexU (:,:,iblk), forceyU (:,:,iblk), & + umassdti (:,:,iblk), fmU (:,:,iblk), & uarear (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & + strintxU (:,:,iblk), strintyU (:,:,iblk), & + taubxU (:,:,iblk), taubyU (:,:,iblk), & uvel_init(:,:,iblk), vvel_init(:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - Tbu (:,:,iblk)) + TbU (:,:,iblk)) ! call ice_timer_stop(timer_tmp2,iblk) !----------------------------------------------------------------- @@ -561,15 +561,15 @@ subroutine eap (dt) indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & - aiu (:,:,iblk), fm (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk)) + aiU (:,:,iblk), fmU (:,:,iblk), & + strocnxU(:,:,iblk), strocnyU(:,:,iblk)) enddo !$OMP END PARALLEL DO - ! strocn computed on U, N, E as needed. Map strocn U divided by aiu to T + ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T ! TODO: This should be done elsewhere as part of generalization? - ! conservation requires aiu be divided before averaging + ! conservation requires aiU be divided before averaging work1 = c0 work2 = c0 !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) @@ -577,8 +577,8 @@ subroutine eap (dt) do ij = 1, icellu(iblk) i = indxui(ij,iblk) j = indxuj(ij,iblk) - work1(i,j,iblk) = strocnx(i,j,iblk)/aiu(i,j,iblk) - work2(i,j,iblk) = strocny(i,j,iblk)/aiu(i,j,iblk) + work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) + work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) enddo enddo call ice_HaloUpdate (work1, halo_info, & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 866775132..ecd283642 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -83,14 +83,14 @@ subroutine evp (dt) use ice_domain, only: nblocks, blocks_ice, halo_info, maskhalo_dyn use ice_domain_size, only: max_blocks, ncat, nx_global, ny_global use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & - strairx, strairy, uocn, vocn, ss_tltx, ss_tlty, iceumask, fm, & - strtltx, strtlty, strocnx, strocny, strintx, strinty, taubx, tauby, & + strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & + strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & strocnxT, strocnyT, strax, stray, & - Tbu, hwater, & - strairxN, strairyN, icenmask, fmN, & + TbU, hwater, & + strairxN, strairyN, fmN, & strtltxN, strtltyN, strocnxN, strocnyN, strintxN, strintyN, taubxN, taubyN, & TbN, & - strairxE, strairyE, iceemask, fmE, & + strairxE, strairyE, fmE, & strtltxE, strtltyE, strocnxE, strocnyE, strintxE, strintyE, taubxE, taubyE, & TbE, & stressp_1, stressp_2, stressp_3, stressp_4, & @@ -99,6 +99,7 @@ subroutine evp (dt) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_grid, only: hm, tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & + iceumask, iceemask, icenmask, & dxE, dxN, dxT, dxU, dyE, dyN, dyT, dyU, & ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & @@ -150,11 +151,11 @@ subroutine evp (dt) ss_tltxU , & ! sea surface slope, x-direction (m/m) ss_tltyU , & ! sea surface slope, y-direction (m/m) tmass , & ! total mass of ice and snow (kg/m^2) - waterx , & ! for ocean stress calculation, x (m/s) - watery , & ! for ocean stress calculation, y (m/s) - forcex , & ! work array: combined atm stress and ocn tilt, x - forcey , & ! work array: combined atm stress and ocn tilt, y - aiu , & ! ice fraction on u-grid + waterxU , & ! for ocean stress calculation, x (m/s) + wateryU , & ! for ocean stress calculation, y (m/s) + forcexU , & ! work array: combined atm stress and ocn tilt, x + forceyU , & ! work array: combined atm stress and ocn tilt, y + aiU , & ! ice fraction on u-grid umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) @@ -317,7 +318,7 @@ subroutine evp (dt) !----------------------------------------------------------------- call grid_average_X2Y('F', tmass , 'T' , umass , 'U') - call grid_average_X2Y('F', aice_init, 'T' , aiu , 'U') + call grid_average_X2Y('F', aice_init, 'T' , aiU , 'U') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU , 'U') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU , 'U') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') @@ -348,15 +349,15 @@ subroutine evp (dt) file=__FILE__, line=__LINE__) if (.not. calc_strair) then - call grid_average_X2Y('F', strax, grid_atm_dynu, strairx, 'U') - call grid_average_X2Y('F', stray, grid_atm_dynv, strairy, 'U') + call grid_average_X2Y('F', strax, grid_atm_dynu, strairxU, 'U') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairyU, 'U') else call ice_HaloUpdate (strairxT, halo_info, & field_loc_center, field_type_vector) call ice_HaloUpdate (strairyT, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('F', strairxT, 'T', strairx, 'U') - call grid_average_X2Y('F', strairyT, 'T', strairy, 'U') + call grid_average_X2Y('F', strairxT, 'T', strairxU, 'U') + call grid_average_X2Y('F', strairyT, 'T', strairyU, 'U') endif if (grid_ice == 'CD' .or. grid_ice == 'C') then @@ -392,20 +393,20 @@ subroutine evp (dt) icellt (iblk), icellu (iblk), & indxti (:,iblk), indxtj (:,iblk), & indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), umass (:,:,iblk), & + aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & + strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & icetmask (:,:,iblk), iceumask (:,:,iblk), & - fm (:,:,iblk), dt, & - strtltx (:,:,iblk), strtlty (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & + fmU (:,:,iblk), dt, & + strtltxU (:,:,iblk), strtltyU (:,:,iblk), & + strocnxU (:,:,iblk), strocnyU (:,:,iblk), & + strintxU (:,:,iblk), strintyU (:,:,iblk), & + taubxU (:,:,iblk), taubyU (:,:,iblk), & + waterxU (:,:,iblk), wateryU (:,:,iblk), & + forcexU (:,:,iblk), forceyU (:,:,iblk), & stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & @@ -414,7 +415,7 @@ subroutine evp (dt) stress12_3(:,:,iblk), stress12_4(:,:,iblk), & uvel_init (:,:,iblk), vvel_init (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - Tbu (:,:,iblk)) + TbU (:,:,iblk)) elseif (trim(grid_ice) == 'CD' .or. grid_ice == 'C') then call dyn_prep2 (nx_block, ny_block, & @@ -422,20 +423,20 @@ subroutine evp (dt) icellt (iblk), icellu (iblk), & indxti (:,iblk), indxtj (:,iblk), & indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), umass (:,:,iblk), & + aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umaskCD (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & + strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & icetmask (:,:,iblk), iceumask (:,:,iblk), & - fm (:,:,iblk), dt, & - strtltx (:,:,iblk), strtlty (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & + fmU (:,:,iblk), dt, & + strtltxU (:,:,iblk), strtltyU (:,:,iblk), & + strocnxU (:,:,iblk), strocnyU (:,:,iblk), & + strintxU (:,:,iblk), strintyU (:,:,iblk), & + taubxU (:,:,iblk), taubyU (:,:,iblk), & + waterxU (:,:,iblk), wateryU (:,:,iblk), & + forcexU (:,:,iblk), forceyU (:,:,iblk), & stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & @@ -444,7 +445,7 @@ subroutine evp (dt) stress12_3(:,:,iblk), stress12_4(:,:,iblk), & uvel_init (:,:,iblk), vvel_init (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - Tbu (:,:,iblk)) + TbU (:,:,iblk)) endif !----------------------------------------------------------------- @@ -642,7 +643,7 @@ subroutine evp (dt) endif !----------------------------------------------------------------- - ! seabed stress factor Tbu (Tbu is part of Cb coefficient) + ! seabed stress factor TbU (TbU is part of Cb coefficient) !----------------------------------------------------------------- if (seabed_stress) then @@ -656,7 +657,7 @@ subroutine evp (dt) icellu (iblk), & indxui (:,iblk), indxuj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), Tbu (:,:,iblk)) + hwater(:,:,iblk), TbU (:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -667,7 +668,7 @@ subroutine evp (dt) icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & - hwater (:,:,iblk), Tbu (:,:,iblk)) + hwater (:,:,iblk), TbU (:,:,iblk)) enddo !$OMP END PARALLEL DO endif @@ -697,7 +698,7 @@ subroutine evp (dt) icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & - hwater (:,:,iblk), Tbu (:,:,iblk) , & + hwater (:,:,iblk), TbU (:,:,iblk) , & TbE (:,:,iblk), TbN (:,:,iblk) , & icelle(iblk), indxei(:,iblk), indxej(:,iblk), & icelln(iblk), indxni(:,iblk), indxnj(:,iblk) ) @@ -724,8 +725,8 @@ subroutine evp (dt) call ice_dyn_evp_1d_copyin( & nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & icetmask, iceumask, & - cdn_ocn,aiu,uocnU,vocnU,forcex,forcey,Tbu, & - umassdti,fm,uarear,tarear,strintx,strinty,uvel_init,vvel_init,& + cdn_ocn,aiU,uocnU,vocnU,forcexU,forceyU,TbU, & + umassdti,fmU,uarear,tarear,strintxU,strintyU,uvel_init,vvel_init,& strength,uvel,vvel,dxT,dyT, & stressp_1 ,stressp_2, stressp_3, stressp_4, & stressm_1 ,stressm_2, stressm_3, stressm_4, & @@ -733,12 +734,12 @@ subroutine evp (dt) call ice_dyn_evp_1d_kernel() call ice_dyn_evp_1d_copyout( & nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & -!strocn uvel,vvel, strocnx,strocny, strintx,strinty, & - uvel,vvel, strintx,strinty, & +!strocn uvel,vvel, strocnxU,strocnyU, strintxU,strintyU, & + uvel,vvel, strintxU,strintyU, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1,stress12_2,stress12_3,stress12_4, & - divu,rdg_conv,rdg_shear,shear,taubx,tauby ) + divu,rdg_conv,rdg_shear,shear,taubxU,taubyU ) call ice_timer_stop(timer_evp_1d) else ! evp_algorithm == standard_2d (Standard CICE) @@ -794,17 +795,17 @@ subroutine evp (dt) call stepu (nx_block , ny_block , & icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), strtmp (:,:,:), & + aiU (:,:,iblk), strtmp (:,:,:), & uocnU (:,:,iblk), vocnU (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & - umassdti (:,:,iblk), fm (:,:,iblk), & + waterxU (:,:,iblk), wateryU (:,:,iblk), & + forcexU (:,:,iblk), forceyU (:,:,iblk), & + umassdti (:,:,iblk), fmU (:,:,iblk), & uarear (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & + strintxU (:,:,iblk), strintyU(:,:,iblk), & + taubxU (:,:,iblk), taubyU (:,:,iblk), & uvel_init(:,:,iblk), vvel_init(:,:,iblk),& uvel (:,:,iblk), vvel (:,:,iblk), & - Tbu (:,:,iblk)) + TbU (:,:,iblk)) enddo ! iblk !$OMP END PARALLEL DO @@ -1263,8 +1264,8 @@ subroutine evp (dt) indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & - aiu (:,:,iblk), fm (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk)) + aiU (:,:,iblk), fmU (:,:,iblk), & + strocnxU(:,:,iblk), strocnyU(:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -1296,10 +1297,10 @@ subroutine evp (dt) endif - ! strocn computed on U, N, E as needed. Map strocn U divided by aiu to T + ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T ! TODO: This should be done elsewhere as part of generalization? ! TODO: Rename strocn[x,y]T since it's different than strocn[x,y][U,N,E] - ! conservation requires aiu be divided before averaging + ! conservation requires aiU be divided before averaging work1 = c0 work2 = c0 !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) SCHEDULE(runtime) @@ -1307,8 +1308,8 @@ subroutine evp (dt) do ij = 1, icellu(iblk) i = indxui(ij,iblk) j = indxuj(ij,iblk) - work1(i,j,iblk) = strocnx(i,j,iblk)/aiu(i,j,iblk) - work2(i,j,iblk) = strocny(i,j,iblk)/aiu(i,j,iblk) + work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) + work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) enddo enddo !$OMP END PARALLEL DO @@ -1320,8 +1321,8 @@ subroutine evp (dt) call grid_average_X2Y('F', work2, 'U', strocnyT, 'T') if (grid_ice == 'CD' .or. grid_ice == 'C') then - call grid_average_X2Y('S', strintxE, 'E', strintx, 'U') ! diagnostic - call grid_average_X2Y('S', strintyN, 'N', strinty, 'U') ! diagnostic + call grid_average_X2Y('S', strintxE, 'E', strintxU, 'U') ! diagnostic + call grid_average_X2Y('S', strintyN, 'N', strintyU, 'U') ! diagnostic endif call ice_timer_stop(timer_dynamics) ! dynamics diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index a30cc1b1c..237861c60 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -192,14 +192,14 @@ subroutine init_dyn (dt) use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks, halo_dynbundle use ice_domain_size, only: max_blocks - use ice_flux, only: rdg_conv, rdg_shear, iceumask, iceemask, icenmask, & + use ice_flux, only: rdg_conv, rdg_shear, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN, divu, shear - use ice_grid, only: ULAT, NLAT, ELAT, tarea + use ice_grid, only: ULAT, NLAT, ELAT, tarea, iceumask, iceemask, icenmask real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -376,8 +376,8 @@ end subroutine set_evp_parameters subroutine dyn_prep1 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & aice, vice, & - vsno, tmask, & - tmass, icetmask) + vsno, Tmask, & + Tmass, iceTmask) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -389,13 +389,13 @@ subroutine dyn_prep1 (nx_block, ny_block, & vsno ! volume per unit area of snow (m) logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & - tmask ! land/boundary mask, thickness (T-cell) + Tmask ! land/boundary mask, thickness (T-cell) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - tmass ! total mass of ice and snow (kg/m^2) + Tmass ! total mass of ice and snow (kg/m^2) integer (kind=int_kind), dimension (nx_block,ny_block), intent(out) :: & - icetmask ! ice extent mask (T-cell) + iceTmask ! ice extent mask (T-cell) ! local variables @@ -423,22 +423,22 @@ subroutine dyn_prep1 (nx_block, ny_block, & ! NOTE: vice and vsno must be up to date in all grid cells, ! including ghost cells !----------------------------------------------------------------- - if (tmask(i,j)) then - tmass(i,j) = (rhoi*vice(i,j) + rhos*vsno(i,j)) ! kg/m^2 + if (Tmask(i,j)) then + Tmass(i,j) = (rhoi*vice(i,j) + rhos*vsno(i,j)) ! kg/m^2 else - tmass(i,j) = c0 + Tmass(i,j) = c0 endif !----------------------------------------------------------------- ! ice extent mask (T-cells) !----------------------------------------------------------------- - tmphm(i,j) = tmask(i,j) .and. (aice (i,j) > a_min) & - .and. (tmass(i,j) > m_min) + tmphm(i,j) = Tmask(i,j) .and. (aice (i,j) > a_min) & + .and. (Tmass(i,j) > m_min) !----------------------------------------------------------------- ! augmented mask (land + open ocean) !----------------------------------------------------------------- - icetmask (i,j) = 0 + iceTmask (i,j) = 0 enddo enddo @@ -450,10 +450,10 @@ subroutine dyn_prep1 (nx_block, ny_block, & if (tmphm(i-1,j+1) .or. tmphm(i,j+1) .or. tmphm(i+1,j+1) .or. & tmphm(i-1,j) .or. tmphm(i,j) .or. tmphm(i+1,j) .or. & tmphm(i-1,j-1) .or. tmphm(i,j-1) .or. tmphm(i+1,j-1) ) then - icetmask(i,j) = 1 + iceTmask(i,j) = 1 endif - if (.not.tmask(i,j)) icetmask(i,j) = 0 + if (.not.Tmask(i,j)) iceTmask(i,j) = 0 enddo enddo @@ -472,16 +472,16 @@ end subroutine dyn_prep1 subroutine dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt, icellu, & - indxti, indxtj, & - indxui, indxuj, & - aiu, umass, & - umassdti, fcor, & - umask, & + icellT, icellX, & + indxTi, indxTj, & + indxXi, indxXj, & + aiX, Xmass, & + Xmassdti, fcor, & + Xmask, & uocn, vocn, & strairx, strairy, & ss_tltx, ss_tlty, & - icetmask, iceumask, & + iceTmask, iceXmask, & fm, dt, & strtltx, strtlty, & strocnx, strocny, & @@ -497,34 +497,34 @@ subroutine dyn_prep2 (nx_block, ny_block, & stress12_3, stress12_4, & uvel_init, vvel_init, & uvel, vvel, & - Tbu) + TbU) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions ilo,ihi,jlo,jhi ! beginning and end of physical domain integer (kind=int_kind), intent(out) :: & - icellt , & ! no. of cells where icetmask = 1 - icellu ! no. of cells where iceumask = 1 + icellT , & ! no. of cells where iceTmask = 1 + icellX ! no. of cells where iceXmask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(out) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction on T grid + indxTj , & ! compressed index in j-direction + indxXi , & ! compressed index in i-direction on X grid, grid depends on call + indxXj ! compressed index in j-direction logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & - umask ! land/boundary mask, thickness (U-cell) + Xmask ! land/boundary mask, thickness (X-grid-cell) integer (kind=int_kind), dimension (nx_block,ny_block), intent(in) :: & - icetmask ! ice extent mask (T-cell) + iceTmask ! ice extent mask (T-cell) logical (kind=log_kind), dimension (nx_block,ny_block), intent(inout) :: & - iceumask ! ice extent mask (U-cell) + iceXmask ! ice extent mask (X-grid-cell) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - aiu , & ! ice fraction on u-grid - umass , & ! total mass of ice and snow (u grid) + aiX , & ! ice fraction on u-grid (X grid) + Xmass , & ! total mass of ice and snow (X grid) fcor , & ! Coriolis parameter (1/s) strairx , & ! stress on ice by air, x-direction strairy , & ! stress on ice by air, y-direction @@ -537,10 +537,10 @@ subroutine dyn_prep2 (nx_block, ny_block, & dt ! time step real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - Tbu, & ! seabed stress factor (N/m^2) + TbU, & ! seabed stress factor (N/m^2) uvel_init,& ! x-component of velocity (m/s), beginning of time step vvel_init,& ! y-component of velocity (m/s), beginning of time step - umassdti, & ! mass of U-cell/dt (kg/m^2 s) + Xmassdti, & ! mass of X-grid-cell/dt (kg/m^2 s) waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) forcex , & ! work array: combined atm stress and ocn tilt, x @@ -571,7 +571,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & gravit logical (kind=log_kind), dimension(nx_block,ny_block) :: & - iceumask_old ! old-time iceumask + iceXmask_old ! old-time iceXmask character(len=*), parameter :: subname = '(dyn_prep2)' @@ -585,12 +585,12 @@ subroutine dyn_prep2 (nx_block, ny_block, & watery (i,j) = c0 forcex (i,j) = c0 forcey (i,j) = c0 - umassdti (i,j) = c0 - Tbu (i,j) = c0 + Xmassdti (i,j) = c0 + TbU (i,j) = c0 taubx (i,j) = c0 tauby (i,j) = c0 - if (icetmask(i,j)==0) then + if (iceTmask(i,j)==0) then stressp_1 (i,j) = c0 stressp_2 (i,j) = c0 stressp_3 (i,j) = c0 @@ -608,44 +608,44 @@ subroutine dyn_prep2 (nx_block, ny_block, & enddo ! j !----------------------------------------------------------------- - ! Identify cells where icetmask = 1 - ! Note: The icellt mask includes north and east ghost cells + ! Identify cells where iceTmask = 1 + ! Note: The icellT mask includes north and east ghost cells ! where stresses are needed. !----------------------------------------------------------------- - icellt = 0 + icellT = 0 do j = jlo, jhi+1 do i = ilo, ihi+1 - if (icetmask(i,j) == 1) then - icellt = icellt + 1 - indxti(icellt) = i - indxtj(icellt) = j + if (iceTmask(i,j) == 1) then + icellT = icellT + 1 + indxTi(icellT) = i + indxTj(icellT) = j endif enddo enddo !----------------------------------------------------------------- - ! Define iceumask - ! Identify cells where iceumask is true + ! Define iceXmask + ! Identify cells where iceXmask is true ! Initialize velocity where needed !----------------------------------------------------------------- - icellu = 0 + icellX = 0 do j = jlo, jhi do i = ilo, ihi - iceumask_old(i,j) = iceumask(i,j) ! save + iceXmask_old(i,j) = iceXmask(i,j) ! save ! ice extent mask (U-cells) - iceumask(i,j) = (umask(i,j)) .and. (aiu (i,j) > a_min) & - .and. (umass(i,j) > m_min) + iceXmask(i,j) = (Xmask(i,j)) .and. (aiX (i,j) > a_min) & + .and. (Xmass(i,j) > m_min) - if (iceumask(i,j)) then - icellu = icellu + 1 - indxui(icellu) = i - indxuj(icellu) = j + if (iceXmask(i,j)) then + icellX = icellX + 1 + indxXi(icellX) = i + indxXj(icellX) = j ! initialize velocity for new ice points to ocean sfc current - if (.not. iceumask_old(i,j)) then + if (.not. iceXmask_old(i,j)) then uvel(i,j) = uocn(i,j) vvel(i,j) = vocn(i,j) endif @@ -675,13 +675,13 @@ subroutine dyn_prep2 (nx_block, ny_block, & file=__FILE__, line=__LINE__) endif - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellX + i = indxXi(ij) + j = indxXj(ij) - umassdti(i,j) = umass(i,j)/dt ! kg/m^2 s + Xmassdti(i,j) = Xmass(i,j)/dt ! kg/m^2 s - fm(i,j) = fcor(i,j)*umass(i,j) ! Coriolis * mass + fm(i,j) = fcor(i,j)*Xmass(i,j) ! Coriolis * mass ! for ocean stress waterx(i,j) = uocn(i,j)*cosw - vocn(i,j)*sinw*sign(c1,fm(i,j)) @@ -693,8 +693,8 @@ subroutine dyn_prep2 (nx_block, ny_block, & strtltx(i,j) = -fm(i,j)*vocn(i,j) strtlty(i,j) = fm(i,j)*uocn(i,j) elseif (trim(ssh_stress) == 'coupled') then - strtltx(i,j) = -gravit*umass(i,j)*ss_tltx(i,j) - strtlty(i,j) = -gravit*umass(i,j)*ss_tlty(i,j) + strtltx(i,j) = -gravit*Xmass(i,j)*ss_tltx(i,j) + strtlty(i,j) = -gravit*Xmass(i,j)*ss_tlty(i,j) else call abort_ice(subname//' ERROR: unknown ssh_stress='//trim(ssh_stress), & file=__FILE__, line=__LINE__) @@ -713,38 +713,38 @@ end subroutine dyn_prep2 ! author: Elizabeth C. Hunke, LANL subroutine stepu (nx_block, ny_block, & - icellu, Cw, & - indxui, indxuj, & - aiu, str, & + icellU, Cw, & + indxUi, indxUj, & + aiX, str, & uocn, vocn, & waterx, watery, & forcex, forcey, & - umassdti, fm, & + Umassdti, fm, & uarear, & strintx, strinty, & taubx, tauby, & uvel_init, vvel_init,& uvel, vvel, & - Tbu) + TbU) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceumask is true integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Tbu, & ! seabed stress factor (N/m^2) + TbU, & ! seabed stress factor (N/m^2) uvel_init,& ! x-component of velocity (m/s), beginning of timestep vvel_init,& ! y-component of velocity (m/s), beginning of timestep - aiu , & ! ice fraction on u-grid + aiX , & ! ice fraction on X-grid waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) forcex , & ! work array: combined atm stress and ocn tilt, x forcey , & ! work array: combined atm stress and ocn tilt, y - umassdti, & ! mass of U-cell/dt (kg/m^2 s) + Umassdti, & ! mass of U-cell/dt (kg/m^2 s) uocn , & ! ocean current, x-direction (m/s) vocn , & ! ocean current, y-direction (m/s) fm , & ! Coriolis param. * mass in U-cell (kg/s) @@ -790,23 +790,23 @@ subroutine stepu (nx_block, ny_block, & if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - do ij =1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij =1, icellU + i = indxUi(ij) + j = indxUj(ij) uold = uvel(i,j) vold = vvel(i,j) ! (magnitude of relative ocean current)*rhow*drag*aice - vrel = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & + vrel = aiX(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & (vocn(i,j) - vold)**2) ! m/s ! ice/ocean stress taux = vrel*waterx(i,j) ! NOTE this is not the entire tauy = vrel*watery(i,j) ! ocn stress term - Cb = Tbu(i,j) / (sqrt(uold**2 + vold**2) + u0) ! for seabed stress + Cb = TbU(i,j) / (sqrt(uold**2 + vold**2) + u0) ! for seabed stress ! revp = 0 for classic evp, 1 for revised evp - cca = (brlx + revp)*umassdti(i,j) + vrel * cosw + Cb ! kg/m^2 s + cca = (brlx + revp)*Umassdti(i,j) + vrel * cosw + Cb ! kg/m^2 s ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s @@ -820,9 +820,9 @@ subroutine stepu (nx_block, ny_block, & ! finally, the velocity components cc1 = strintx(i,j) + forcex(i,j) + taux & - + umassdti(i,j)*(brlx*uold + revp*uvel_init(i,j)) + + Umassdti(i,j)*(brlx*uold + revp*uvel_init(i,j)) cc2 = strinty(i,j) + forcey(i,j) + tauy & - + umassdti(i,j)*(brlx*vold + revp*vvel_init(i,j)) + + Umassdti(i,j)*(brlx*vold + revp*vvel_init(i,j)) uvel(i,j) = (cca*cc1 + ccb*cc2) / ab2 ! m/s vvel(i,j) = (cca*cc2 - ccb*cc1) / ab2 @@ -839,18 +839,18 @@ end subroutine stepu ! Integration of the momentum equation to find velocity (u,v) at E and N locations subroutine stepuv_CD (nx_block, ny_block, & - icell, Cw, & - indxi, indxj, & - aiu, & - uocn, vocn, & - waterx, watery, & - forcex, forcey, & - massdti, fm, & - strintx, strinty, & - taubx, tauby, & - uvel_init, vvel_init,& - uvel, vvel, & - Tb) + icell, Cw, & + indxi, indxj, & + aiX, & + uocn, vocn, & + waterx, watery, & + forcex, forcey, & + massdti, fm, & + strintx, strinty, & + taubx, tauby, & + uvel_init, vvel_init,& + uvel, vvel, & + Tb) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -864,7 +864,7 @@ subroutine stepuv_CD (nx_block, ny_block, & Tb, & ! seabed stress factor (N/m^2) uvel_init,& ! x-component of velocity (m/s), beginning of timestep vvel_init,& ! y-component of velocity (m/s), beginning of timestep - aiu , & ! ice fraction on [en]-grid + aiX , & ! ice fraction on X-grid waterx , & ! for ocean stress calculation, x (m/s) watery , & ! for ocean stress calculation, y (m/s) forcex , & ! work array: combined atm stress and ocn tilt, x @@ -920,7 +920,7 @@ subroutine stepuv_CD (nx_block, ny_block, & vold = vvel(i,j) ! (magnitude of relative ocean current)*rhow*drag*aice - vrel = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & + vrel = aiX(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & (vocn(i,j) - vold)**2) ! m/s ! ice/ocean stress taux = vrel*waterx(i,j) ! NOTE this is not the entire @@ -956,16 +956,16 @@ end subroutine stepuv_CD ! Integration of the momentum equation to find velocity u at E location on C grid subroutine stepu_C (nx_block, ny_block, & - icell, Cw, & - indxi, indxj, & - aiu, & - uocn, vocn, & - waterx, forcex, & - massdti, fm, & - strintx, taubx, & - uvel_init, & - uvel, vvel, & - Tb) + icell, Cw, & + indxi, indxj, & + aiX, & + uocn, vocn, & + waterx, forcex, & + massdti, fm, & + strintx, taubx, & + uvel_init, & + uvel, vvel, & + Tb) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -978,7 +978,7 @@ subroutine stepu_C (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & Tb, & ! seabed stress factor (N/m^2) uvel_init,& ! x-component of velocity (m/s), beginning of timestep - aiu , & ! ice fraction on [en]-grid + aiX , & ! ice fraction on X-grid waterx , & ! for ocean stress calculation, x (m/s) forcex , & ! work array: combined atm stress and ocn tilt, x massdti , & ! mass of e-cell/dt (kg/m^2 s) @@ -1025,7 +1025,7 @@ subroutine stepu_C (nx_block, ny_block, & vold = vvel(i,j) ! (magnitude of relative ocean current)*rhow*drag*aice - vrel = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & + vrel = aiX(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & (vocn(i,j) - vold)**2) ! m/s ! ice/ocean stress taux = vrel*waterx(i,j) ! NOTE this is not the entire @@ -1055,16 +1055,16 @@ end subroutine stepu_C ! Integration of the momentum equation to find velocity v at N location on C grid subroutine stepv_C (nx_block, ny_block, & - icell, Cw, & - indxi, indxj, & - aiu, & - uocn, vocn, & - watery, forcey, & - massdti, fm, & - strinty, tauby, & - vvel_init, & - uvel, vvel, & - Tb) + icell, Cw, & + indxi, indxj, & + aiX, & + uocn, vocn, & + watery, forcey, & + massdti, fm, & + strinty, tauby, & + vvel_init, & + uvel, vvel, & + Tb) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1077,7 +1077,7 @@ subroutine stepv_C (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & Tb, & ! seabed stress factor (N/m^2) vvel_init,& ! y-component of velocity (m/s), beginning of timestep - aiu , & ! ice fraction on [en]-grid + aiX , & ! ice fraction on X-grid watery , & ! for ocean stress calculation, y (m/s) forcey , & ! work array: combined atm stress and ocn tilt, y massdti , & ! mass of n-cell/dt (kg/m^2 s) @@ -1124,7 +1124,7 @@ subroutine stepv_C (nx_block, ny_block, & vold = vvel(i,j) ! (magnitude of relative ocean current)*rhow*drag*aice - vrel = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & + vrel = aiX(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & (vocn(i,j) - vold)**2) ! m/s ! ice/ocean stress tauy = vrel*watery(i,j) ! NOTE this is not the entire ocn stress @@ -1157,27 +1157,27 @@ end subroutine stepv_C ! author: Elizabeth C. Hunke, LANL subroutine dyn_finish (nx_block, ny_block, & - icellu, Cw, & - indxui, indxuj, & + icellU, Cw, & + indxUi, indxUj, & uvel, vvel, & uocn, vocn, & - aiu, fm, & + aiX, fm, & strocnx, strocny) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceumask is true integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel , & ! x-component of velocity (m/s) vvel , & ! y-component of velocity (m/s) uocn , & ! ocean current, x-direction (m/s) vocn , & ! ocean current, y-direction (m/s) - aiu , & ! ice fraction on u-grid + aiX , & ! ice fraction on X-grid fm ! Coriolis param. * mass in U-cell (kg/s) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & @@ -1204,20 +1204,20 @@ subroutine dyn_finish (nx_block, ny_block, & file=__FILE__, line=__LINE__) ! ocean-ice stress for coupling - do ij =1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij =1, icellU + i = indxUi(ij) + j = indxUj(ij) vrel = rhow*Cw(i,j)*sqrt((uocn(i,j) - uvel(i,j))**2 + & (vocn(i,j) - vvel(i,j))**2) ! m/s ! strocnx(i,j) = strocnx(i,j) & -! - vrel*(uvel(i,j)*cosw - vvel(i,j)*sinw) * aiu(i,j) +! - vrel*(uvel(i,j)*cosw - vvel(i,j)*sinw) * aiX(i,j) ! strocny(i,j) = strocny(i,j) & -! - vrel*(vvel(i,j)*cosw + uvel(i,j)*sinw) * aiu(i,j) +! - vrel*(vvel(i,j)*cosw + uvel(i,j)*sinw) * aiX(i,j) ! update strocnx to most recent iterate and complete the term - vrel = vrel * aiu(i,j) + vrel = vrel * aiX(i,j) strocnx(i,j) = vrel*((uocn(i,j) - uvel(i,j))*cosw & - (vocn(i,j) - vvel(i,j))*sinw*sign(c1,fm(i,j))) strocny(i,j) = vrel*((vocn(i,j) - vvel(i,j))*cosw & @@ -1233,7 +1233,7 @@ subroutine dyn_finish (nx_block, ny_block, & end subroutine dyn_finish !======================================================================= -! Computes seabed (basal) stress factor Tbu (landfast ice) based on mean +! Computes seabed (basal) stress factor TbU (landfast ice) based on mean ! thickness and bathymetry data. LKD refers to linear keel draft. This ! parameterization assumes that the largest keel draft varies linearly ! with the mean thickness. @@ -1248,25 +1248,25 @@ end subroutine dyn_finish ! ! author: JF Lemieux, Philippe Blain (ECCC) ! -! note1: Tbu is a part of the Cb as defined in Lemieux et al. 2015 and 2016. +! note1: TbU is a part of the Cb as defined in Lemieux et al. 2015 and 2016. ! note2: Seabed stress (better name) was called basal stress in Lemieux et al. 2015 subroutine seabed_stress_factor_LKD (nx_block, ny_block, & - icellu, & - indxui, indxuj, & + icellU, & + indxUi, indxUj, & vice, aice, & - hwater, Tbu, & + hwater, TbU, & grid_location) use ice_grid, only: grid_neighbor_min, grid_neighbor_max integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where ice[uen]mask = 1 + icellU ! no. of cells where ice[uen]mask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & aice , & ! concentration of ice at tracer location @@ -1274,7 +1274,7 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & hwater ! water depth at tracer location (m) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - Tbu ! seabed stress factor at 'grid_location' (N/m^2) + TbU ! seabed stress factor at 'grid_location' (N/m^2) character(len=*), optional, intent(inout) :: & grid_location ! grid location (U, E, N), U assumed if not present @@ -1301,9 +1301,9 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & l_grid_location = grid_location endif - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ! convert quantities to grid_location @@ -1319,14 +1319,14 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & hcu = au * hwu / k1 ! 2- calculate seabed stress factor - Tbu(i,j) = docalc_tbu*k2 * max(c0,(hu - hcu)) * exp(-alphab * (c1 - au)) + TbU(i,j) = docalc_tbu*k2 * max(c0,(hu - hcu)) * exp(-alphab * (c1 - au)) enddo ! ij end subroutine seabed_stress_factor_LKD !======================================================================= -! Computes seabed (basal) stress factor Tbu (landfast ice) based on +! Computes seabed (basal) stress factor TbU (landfast ice) based on ! probability of contact between the ITD and the seabed. The water depth ! could take into account variations of the SSH. In the simplest ! formulation, hwater is simply the value of the bathymetry. To calculate @@ -1340,13 +1340,13 @@ end subroutine seabed_stress_factor_LKD ! authors: D. Dumont, J.F. Lemieux, E. Dumas-Lefebvre, F. Dupont ! subroutine seabed_stress_factor_prob (nx_block, ny_block, & - icellt, indxti, indxtj, & - icellu, indxui, indxuj, & + icellT, indxTi, indxTj, & + icellU, indxUi, indxUj, & aicen, vicen, & - hwater, Tbu, & + hwater, TbU, & TbE, TbN, & - icelle, indxei, indxej, & - icelln, indxni, indxnj) + icellE, indxEi, indxEj, & + icellN, indxNi, indxNj) ! use modules use ice_arrays_column, only: hin_max @@ -1355,13 +1355,13 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt, icellu ! no. of cells where ice[tu]mask = 1 + icellT, icellU ! no. of cells where ice[tu]mask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj , & ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & hwater ! water depth at tracer location (m) @@ -1371,20 +1371,20 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & vicen ! partial volume for last thickness category in ITD (m) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - Tbu ! seabed stress factor at U location (N/m^2) + TbU ! seabed stress factor at U location (N/m^2) real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout), optional :: & TbE, & ! seabed stress factor at E location (N/m^2) TbN ! seabed stress factor at N location (N/m^2) integer (kind=int_kind), intent(in), optional :: & - icelle, icelln ! no. of cells where ice[en]mask = 1 + icellE, icellN ! no. of cells where ice[en]mask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in), optional :: & - indxei , & ! compressed index in i-direction - indxej , & ! compressed index in j-direction - indxni , & ! compressed index in i-direction - indxnj ! compressed index in j-direction + indxEi , & ! compressed index in i-direction + indxEj , & ! compressed index in j-direction + indxNi , & ! compressed index in i-direction + indxNj ! compressed index in j-direction ! local variables @@ -1444,9 +1444,9 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & Tbt=c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) atot = sum(aicen(i,j,1:ncat)) @@ -1517,27 +1517,27 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & enddo if (grid_ice == "B") then - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ! convert quantities to U-location - Tbu(i,j) = grid_neighbor_max(Tbt, i, j, 'U') + TbU(i,j) = grid_neighbor_max(Tbt, i, j, 'U') enddo ! ij elseif (grid_ice == "C" .or. grid_ice == "CD") then if (present(Tbe) .and. present(TbN) .and. & - present(icelle) .and. present(icelln) .and. & - present(indxei) .and. present(indxej) .and. & - present(indxni) .and. present(indxnj)) then + present(icellE) .and. present(icellN) .and. & + present(indxEi) .and. present(indxEj) .and. & + present(indxNi) .and. present(indxNj)) then - do ij = 1, icelle - i = indxei(ij) - j = indxej(ij) + do ij = 1, icellE + i = indxEi(ij) + j = indxEj(ij) ! convert quantities to E-location TbE(i,j) = grid_neighbor_max(Tbt, i, j, 'E') enddo - do ij = 1, icelln - i = indxni(ij) - j = indxnj(ij) + do ij = 1, icellN + i = indxNi(ij) + j = indxNj(ij) ! convert quantities to N-location TbN(i,j) = grid_neighbor_max(Tbt, i, j, 'N') enddo @@ -1621,8 +1621,8 @@ end subroutine principal_stress ! 2019: subroutine created by Philippe Blain, ECCC subroutine deformations (nx_block, ny_block, & - icellt, & - indxti, indxtj, & + icellT, & + indxTi, indxTj, & uvel, vvel, & dxT, dyT, & cxp, cyp, & @@ -1635,11 +1635,11 @@ subroutine deformations (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel , & ! x-component of velocity (m/s) @@ -1672,9 +1672,9 @@ subroutine deformations (nx_block, ny_block, & character(len=*), parameter :: subname = '(deformations)' - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1719,8 +1719,8 @@ end subroutine deformations ! Nov 2021 subroutine deformationsCD_T (nx_block, ny_block, & - icellt, & - indxti, indxtj, & + icellT, & + indxTi, indxTj, & uvelE, vvelE, & uvelN, vvelN, & dxN, dyE, & @@ -1733,11 +1733,11 @@ subroutine deformationsCD_T (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvelE , & ! x-component of velocity (m/s) at the E point @@ -1778,8 +1778,8 @@ subroutine deformationsCD_T (nx_block, ny_block, & !----------------------------------------------------------------- call strain_rates_T (nx_block , ny_block , & - icellt , & - indxti(:) , indxtj (:) , & + icellT , & + indxTi(:) , indxTj (:) , & uvelE (:,:), vvelE (:,:), & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & @@ -1787,9 +1787,9 @@ subroutine deformationsCD_T (nx_block, ny_block, & divT (:,:), tensionT(:,:), & shearT(:,:), DeltaT (:,:) ) - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! deformations for mechanical redistribution @@ -1815,8 +1815,8 @@ end subroutine deformationsCD_T ! Nov 2021 subroutine deformationsC_T (nx_block, ny_block, & - icellt, & - indxti, indxtj, & + icellT, & + indxTi, indxTj, & uvelE, vvelE, & uvelN, vvelN, & dxN, dyE, & @@ -1830,11 +1830,11 @@ subroutine deformationsC_T (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvelE , & ! x-component of velocity (m/s) at the E point @@ -1878,8 +1878,8 @@ subroutine deformationsC_T (nx_block, ny_block, & !----------------------------------------------------------------- call strain_rates_T (nx_block , ny_block , & - icellt , & - indxti(:) , indxtj (:) , & + icellT , & + indxTi(:) , indxTj (:) , & uvelE (:,:), vvelE (:,:), & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & @@ -1889,9 +1889,9 @@ subroutine deformationsC_T (nx_block, ny_block, & ! DeltaT is calc by strain_rates_T but replaced by calculation below. - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! deformations for mechanical redistribution @@ -2014,22 +2014,22 @@ end subroutine strain_rates ! Nov 2021 subroutine strain_rates_Tdtsd (nx_block, ny_block, & - icellt, & - indxti, indxtj, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - divT, tensionT, & - shearT, DeltaT ) + icellT, & + indxTi, indxTj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + divT, tensionT, & + shearT, DeltaT ) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt + icellT integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvelE , & ! x-component of velocity (m/s) at the E point @@ -2061,8 +2061,8 @@ subroutine strain_rates_Tdtsd (nx_block, ny_block, & ! compute divT, tensionT call strain_rates_Tdt (nx_block, ny_block, & - icellt, & - indxti, indxtj, & + icellT, & + indxTi, indxTj, & uvelE, vvelE, & uvelN, vvelN, & dxN, dyE, & @@ -2072,9 +2072,9 @@ subroutine strain_rates_Tdtsd (nx_block, ny_block, & shearT (:,:) = c0 deltaT (:,:) = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) ! shearing strain rate = 2*e_12 shearT(i,j) = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & @@ -2094,21 +2094,21 @@ end subroutine strain_rates_Tdtsd ! Nov 2021 subroutine strain_rates_Tdt (nx_block, ny_block, & - icellt, & - indxti, indxtj, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - divT, tensionT ) + icellT, & + indxTi, indxTj, & + uvelE, vvelE, & + uvelN, vvelN, & + dxN, dyE, & + dxT, dyT, & + divT, tensionT ) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt + icellT integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvelE , & ! x-component of velocity (m/s) at the E point @@ -2139,9 +2139,9 @@ subroutine strain_rates_Tdt (nx_block, ny_block, & divT (:,:) = c0 tensionT(:,:) = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) ! divergence = e_11 + e_22 divT (i,j)= dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & @@ -2162,8 +2162,8 @@ end subroutine strain_rates_Tdt ! Nov 2021 subroutine strain_rates_U (nx_block, ny_block, & - icellu, & - indxui, indxuj, & + icellU, & + indxUi, indxUj, & uvelE, vvelE, & uvelN, vvelN, & uvelU, vvelU, & @@ -2177,11 +2177,11 @@ subroutine strain_rates_U (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu + icellU integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvelE , & ! x-component of velocity (m/s) at the E point @@ -2227,9 +2227,9 @@ subroutine strain_rates_U (nx_block, ny_block, & shearU (:,:) = c0 deltaU (:,:) = c0 - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) uNip1j = uvelN(i+1,j) * npm(i+1,j) & +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * uvelN(i,j) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 24421a91f..7e0bdb745 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -50,7 +50,7 @@ module ice_dyn_vp seabed_stress_factor_LKD, seabed_stress_factor_prob, seabed_stress_method, & seabed_stress, Ktens, stack_fields, unstack_fields use ice_fileunits, only: nu_diag - use ice_flux, only: fm + use ice_flux, only: fmU use ice_global_reductions, only: global_sum, global_allreduce_sum use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, uarear use ice_exit, only: abort_ice @@ -167,15 +167,15 @@ subroutine implicit_solver (dt) use ice_domain_size, only: max_blocks, ncat use ice_dyn_shared, only: deformations use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & - strairx, strairy, uocn, vocn, ss_tltx, ss_tlty, iceumask, fm, & - strtltx, strtlty, strocnx, strocny, strintx, strinty, taubx, tauby, & + strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & + strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & strocnxT, strocnyT, strax, stray, & - Tbu, hwater, & + TbU, hwater, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxT, dyT, cxp, cyp, cxm, cym, & - tarear, grid_type, grid_average_X2Y, & + tarear, grid_type, grid_average_X2Y, iceumask, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength @@ -199,16 +199,16 @@ subroutine implicit_solver (dt) ss_tltxU , & ! sea surface slope, x-direction (m/m) ss_tltyU , & ! sea surface slope, y-direction (m/m) tmass , & ! total mass of ice and snow (kg/m^2) - waterx , & ! for ocean stress calculation, x (m/s) - watery , & ! for ocean stress calculation, y (m/s) - forcex , & ! work array: combined atm stress and ocn tilt, x - forcey , & ! work array: combined atm stress and ocn tilt, y + waterxU , & ! for ocean stress calculation, x (m/s) + wateryU , & ! for ocean stress calculation, y (m/s) + forcexU , & ! work array: combined atm stress and ocn tilt, x + forceyU , & ! work array: combined atm stress and ocn tilt, y bxfix , & ! part of bx that is constant during Picard byfix , & ! part of by that is constant during Picard Cb , & ! seabed stress coefficient fpresx , & ! fixed point residual vector, x components: fx = uvel - uprev_k fpresy , & ! fixed point residual vector, y components: fy = vvel - vprev_k - aiu , & ! ice fraction on u-grid + aiU , & ! ice fraction on u-grid umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) @@ -303,7 +303,7 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- call grid_average_X2Y('F',tmass , 'T', umass, 'U') - call grid_average_X2Y('F',aice_init, 'T', aiu , 'U') + call grid_average_X2Y('F',aice_init, 'T', aiU , 'U') call grid_average_X2Y('S',uocn , grid_ocn_dynu, uocnU , 'U') call grid_average_X2Y('S',vocn , grid_ocn_dynv, vocnU , 'U') call grid_average_X2Y('S',ss_tltx, grid_ocn_dynu, ss_tltxU, 'U') @@ -319,15 +319,15 @@ subroutine implicit_solver (dt) file=__FILE__, line=__LINE__) if (.not. calc_strair) then - call grid_average_X2Y('F', strax, grid_atm_dynu, strairx, 'U') - call grid_average_X2Y('F', stray, grid_atm_dynv, strairy, 'U') + call grid_average_X2Y('F', strax, grid_atm_dynu, strairxU, 'U') + call grid_average_X2Y('F', stray, grid_atm_dynv, strairyU, 'U') else call ice_HaloUpdate (strairxT, halo_info, & field_loc_center, field_type_vector) call ice_HaloUpdate (strairyT, halo_info, & field_loc_center, field_type_vector) - call grid_average_X2Y('F',strairxT,'T',strairx,'U') - call grid_average_X2Y('F',strairyT,'T',strairy,'U') + call grid_average_X2Y('F',strairxT,'T',strairxU,'U') + call grid_average_X2Y('F',strairyT,'T',strairyU,'U') endif ! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength @@ -350,20 +350,20 @@ subroutine implicit_solver (dt) icellt(iblk), icellu(iblk), & indxti (:,iblk), indxtj (:,iblk), & indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), umass (:,:,iblk), & + aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & - strairx (:,:,iblk), strairy (:,:,iblk), & + strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & icetmask (:,:,iblk), iceumask (:,:,iblk), & - fm (:,:,iblk), dt, & - strtltx (:,:,iblk), strtlty (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk), & - strintx (:,:,iblk), strinty (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & + fmU (:,:,iblk), dt, & + strtltxU (:,:,iblk), strtltyU (:,:,iblk), & + strocnxU (:,:,iblk), strocnyU (:,:,iblk), & + strintxU (:,:,iblk), strintyU (:,:,iblk), & + taubxU (:,:,iblk), taubyU (:,:,iblk), & + waterxU (:,:,iblk), wateryU (:,:,iblk), & + forcexU (:,:,iblk), forceyU (:,:,iblk), & stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & @@ -372,13 +372,13 @@ subroutine implicit_solver (dt) stress12_3(:,:,iblk), stress12_4(:,:,iblk), & uvel_init (:,:,iblk), vvel_init (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & - Tbu (:,:,iblk)) + TbU (:,:,iblk)) call calc_bfix (nx_block , ny_block , & icellu(iblk) , & indxui (:,iblk), indxuj (:,iblk), & umassdti (:,:,iblk), & - forcex (:,:,iblk), forcey (:,:,iblk), & + forcexU (:,:,iblk), forceyU (:,:,iblk), & uvel_init (:,:,iblk), vvel_init (:,:,iblk), & bxfix (:,:,iblk), byfix (:,:,iblk)) @@ -427,7 +427,7 @@ subroutine implicit_solver (dt) endif !----------------------------------------------------------------- - ! seabed stress factor Tbu (Tbu is part of Cb coefficient) + ! seabed stress factor TbU (TbU is part of Cb coefficient) !----------------------------------------------------------------- if (seabed_stress) then if ( seabed_stress_method == 'LKD' ) then @@ -437,7 +437,7 @@ subroutine implicit_solver (dt) icellu (iblk), & indxui(:,iblk), indxuj(:,iblk), & vice(:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) + hwater(:,:,iblk), TbU(:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -449,7 +449,7 @@ subroutine implicit_solver (dt) icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk), & - hwater(:,:,iblk), Tbu(:,:,iblk)) + hwater(:,:,iblk), TbU(:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -472,17 +472,17 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- ! Start of nonlinear iteration !----------------------------------------------------------------- - call anderson_solver (icellt , icellu, & - indxti , indxtj, & - indxui , indxuj, & - aiu , ntot , & - uocnU , vocnU , & - waterx , watery, & - bxfix , byfix , & - umassdti, sol , & - fpresx , fpresy, & - zetax2 , etax2 , & - rep_prs , & + call anderson_solver (icellt , icellu , & + indxti , indxtj , & + indxui , indxuj , & + aiU , ntot , & + uocnU , vocnU , & + waterxU , wateryU, & + bxfix , byfix , & + umassdti, sol , & + fpresx , fpresy , & + zetax2 , etax2 , & + rep_prs , & Cb, halo_info_mask) !----------------------------------------------------------------- ! End of nonlinear iteration @@ -544,7 +544,7 @@ subroutine implicit_solver (dt) indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & Cb (:,:,iblk), & - taubx (:,:,iblk), tauby (:,:,iblk)) + taubxU (:,:,iblk), taubyU (:,:,iblk)) enddo !$OMP END PARALLEL DO endif @@ -630,17 +630,17 @@ subroutine implicit_solver (dt) indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & - aiu (:,:,iblk), fm (:,:,iblk), & -! strintx (:,:,iblk), strinty (:,:,iblk), & -! strairx (:,:,iblk), strairy (:,:,iblk), & - strocnx (:,:,iblk), strocny (:,:,iblk)) + aiU (:,:,iblk), fmU (:,:,iblk), & +! strintxU(:,:,iblk), strintyU(:,:,iblk), & +! strairxU(:,:,iblk), strairyU(:,:,iblk), & + strocnxU(:,:,iblk), strocnyU(:,:,iblk)) enddo !$OMP END PARALLEL DO - ! strocn computed on U, N, E as needed. Map strocn U divided by aiu to T + ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T ! TODO: This should be done elsewhere as part of generalization? - ! conservation requires aiu be divided before averaging + ! conservation requires aiU be divided before averaging work1 = c0 work2 = c0 !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) @@ -648,8 +648,8 @@ subroutine implicit_solver (dt) do ij = 1, icellu(iblk) i = indxui(ij,iblk) j = indxuj(ij,iblk) - work1(i,j,iblk) = strocnx(i,j,iblk)/aiu(i,j,iblk) - work2(i,j,iblk) = strocny(i,j,iblk)/aiu(i,j,iblk) + work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) + work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) enddo enddo call ice_HaloUpdate (work1, halo_info, & @@ -683,17 +683,17 @@ end subroutine implicit_solver ! H. F. Walker, “Anderson Acceleration: Algorithms and Implementations” ! [Online]. Available: https://users.wpi.edu/~walker/Papers/anderson_accn_algs_imps.pdf - subroutine anderson_solver (icellt , icellu, & - indxti , indxtj, & - indxui , indxuj, & - aiu , ntot , & - uocn , vocn , & - waterx , watery, & - bxfix , byfix , & - umassdti, sol , & - fpresx , fpresy, & - zetax2 , etax2 , & - rep_prs , & + subroutine anderson_solver (icellt , icellu , & + indxti , indxtj , & + indxui , indxuj , & + aiU , ntot , & + uocn , vocn , & + waterxU , wateryU, & + bxfix , byfix , & + umassdti, sol , & + fpresx , fpresy , & + zetax2 , etax2 , & + rep_prs , & Cb, halo_info_mask) use ice_arrays_column, only: Cdn_ocn @@ -702,7 +702,7 @@ subroutine anderson_solver (icellt , icellu, & use ice_constants, only: c1 use ice_domain, only: maskhalo_dyn, halo_info use ice_domain_size, only: max_blocks - use ice_flux, only: fm, Tbu + use ice_flux, only: fmU, TbU use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & uarear use ice_dyn_shared, only: DminTarea @@ -723,11 +723,11 @@ subroutine anderson_solver (icellt , icellu, & indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & - aiu , & ! ice fraction on u-grid + aiU , & ! ice fraction on u-grid uocn , & ! i ocean current (m/s) vocn , & ! j ocean current (m/s) - waterx , & ! for ocean stress calculation, x (m/s) - watery , & ! for ocean stress calculation, y (m/s) + waterxU , & ! for ocean stress calculation, x (m/s) + wateryU , & ! for ocean stress calculation, y (m/s) bxfix , & ! part of bx that is constant during Picard byfix , & ! part of by that is constant during Picard umassdti ! mass of U-cell/dte (kg/m^2 s) @@ -862,7 +862,7 @@ subroutine anderson_solver (icellt , icellu, & call calc_vrel_Cb (nx_block , ny_block , & icellu (iblk), Cdn_ocn (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & - aiu (:,:,iblk), Tbu (:,:,iblk), & + aiU (:,:,iblk), TbU (:,:,iblk), & uocn (:,:,iblk), vocn (:,:,iblk), & ulin (:,:,iblk), vlin (:,:,iblk), & vrel (:,:,iblk), Cb (:,:,iblk)) @@ -872,7 +872,7 @@ subroutine anderson_solver (icellt , icellu, & icellu (iblk), & indxui (:,iblk), indxuj (:,iblk), & stress_Pr (:,:,:), uarear (:,:,iblk), & - waterx (:,:,iblk), watery (:,:,iblk), & + waterxU (:,:,iblk), wateryU (:,:,iblk), & bxfix (:,:,iblk), byfix (:,:,iblk), & bx (:,:,iblk), by (:,:,iblk), & vrel (:,:,iblk)) @@ -889,7 +889,7 @@ subroutine anderson_solver (icellt , icellu, & uprev_k (:,:,iblk) , vprev_k (:,:,iblk), & vrel (:,:,iblk) , Cb (:,:,iblk), & zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:), & - umassdti (:,:,iblk) , fm (:,:,iblk), & + umassdti (:,:,iblk) , fmU (:,:,iblk), & uarear (:,:,iblk) , & Au (:,:,iblk) , Av (:,:,iblk)) call residual_vec (nx_block , ny_block , & @@ -1457,7 +1457,7 @@ end subroutine stress_vp subroutine calc_vrel_Cb (nx_block, ny_block, & icellu , Cw , & indxui , indxuj , & - aiu , Tbu , & + aiU , TbU , & uocn , vocn , & uvel , vvel , & vrel , Cb) @@ -1473,8 +1473,8 @@ subroutine calc_vrel_Cb (nx_block, ny_block, & indxuj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Tbu, & ! seabed stress factor (N/m^2) - aiu , & ! ice fraction on u-grid + TbU, & ! seabed stress factor (N/m^2) + aiU , & ! ice fraction on u-grid uocn , & ! ocean current, x-direction (m/s) vocn , & ! ocean current, y-direction (m/s) Cw ! ocean-ice neutral drag coefficient @@ -1507,10 +1507,10 @@ subroutine calc_vrel_Cb (nx_block, ny_block, & j = indxuj(ij) ! (magnitude of relative ocean current)*rhow*drag*aice - vrel(i,j) = aiu(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uvel(i,j))**2 + & + vrel(i,j) = aiU(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uvel(i,j))**2 + & (vocn(i,j) - vvel(i,j))**2) ! m/s - Cb(i,j) = Tbu(i,j) / (sqrt(uvel(i,j)**2 + vvel(i,j)**2) + u0) ! for seabed stress + Cb(i,j) = TbU(i,j) / (sqrt(uvel(i,j)**2 + vvel(i,j)**2) + u0) ! for seabed stress enddo ! ij end subroutine calc_vrel_Cb @@ -1524,7 +1524,7 @@ subroutine calc_seabed_stress (nx_block, ny_block, & indxui , indxuj , & uvel , vvel , & Cb , & - taubx , tauby) + taubxU , taubyU) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1540,8 +1540,8 @@ subroutine calc_seabed_stress (nx_block, ny_block, & Cb ! seabed stress coefficient real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - taubx , & ! seabed stress, x-direction (N/m^2) - tauby ! seabed stress, y-direction (N/m^2) + taubxU , & ! seabed stress, x-direction (N/m^2) + taubyU ! seabed stress, y-direction (N/m^2) ! local variables @@ -1554,8 +1554,8 @@ subroutine calc_seabed_stress (nx_block, ny_block, & i = indxui(ij) j = indxuj(ij) - taubx(i,j) = -uvel(i,j)*Cb(i,j) - tauby(i,j) = -vvel(i,j)*Cb(i,j) + taubxU(i,j) = -uvel(i,j)*Cb(i,j) + taubyU(i,j) = -vvel(i,j)*Cb(i,j) enddo ! ij end subroutine calc_seabed_stress @@ -1577,7 +1577,7 @@ subroutine matvec (nx_block, ny_block, & uvel , vvel , & vrel , Cb , & zetax2 , etax2 , & - umassdti, fm , & + umassdti, fmU , & uarear , & Au , Av) @@ -1610,7 +1610,7 @@ subroutine matvec (nx_block, ny_block, & vrel , & ! coefficient for tauw Cb , & ! coefficient for seabed stress umassdti, & ! mass of U-cell/dt (kg/m^2 s) - fm , & ! Coriolis param. * mass in U-cell (kg/s) + fmU , & ! Coriolis param. * mass in U-cell (kg/s) uarear ! 1/uarea real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(in) :: & @@ -1816,7 +1816,7 @@ subroutine matvec (nx_block, ny_block, & ccaimp = umassdti(i,j) + vrel(i,j) * cosw + Cb(i,j) ! kg/m^2 s - ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel(i,j) * sinw ! kg/m^2 s + ccb = fmU(i,j) + sign(c1,fmU(i,j)) * vrel(i,j) * sinw ! kg/m^2 s ! divergence of the internal stress tensor strintx = uarear(i,j)* & @@ -1839,7 +1839,7 @@ subroutine calc_bfix (nx_block , ny_block , & icellu , & indxui , indxuj , & umassdti , & - forcex , forcey , & + forcexU , forceyU , & uvel_init, vvel_init, & bxfix , byfix) @@ -1855,8 +1855,8 @@ subroutine calc_bfix (nx_block , ny_block , & uvel_init,& ! x-component of velocity (m/s), beginning of time step vvel_init,& ! y-component of velocity (m/s), beginning of time step umassdti, & ! mass of U-cell/dt (kg/m^2 s) - forcex , & ! work array: combined atm stress and ocn tilt, x - forcey ! work array: combined atm stress and ocn tilt, y + forcexU , & ! work array: combined atm stress and ocn tilt, x + forceyU ! work array: combined atm stress and ocn tilt, y real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & bxfix , & ! bx = taux + bxfix @@ -1873,8 +1873,8 @@ subroutine calc_bfix (nx_block , ny_block , & i = indxui(ij) j = indxuj(ij) - bxfix(i,j) = umassdti(i,j)*uvel_init(i,j) + forcex(i,j) - byfix(i,j) = umassdti(i,j)*vvel_init(i,j) + forcey(i,j) + bxfix(i,j) = umassdti(i,j)*uvel_init(i,j) + forcexU(i,j) + byfix(i,j) = umassdti(i,j)*vvel_init(i,j) + forceyU(i,j) enddo end subroutine calc_bfix @@ -1889,7 +1889,7 @@ subroutine calc_bvec (nx_block, ny_block, & icellu , & indxui , indxuj , & stPr , uarear , & - waterx , watery , & + waterxU , wateryU , & bxfix , byfix , & bx , by , & vrel) @@ -1904,8 +1904,8 @@ subroutine calc_bvec (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uarear , & ! 1/uarea - waterx , & ! for ocean stress calculation, x (m/s) - watery , & ! for ocean stress calculation, y (m/s) + waterxU , & ! for ocean stress calculation, x (m/s) + wateryU , & ! for ocean stress calculation, y (m/s) bxfix , & ! bx = taux + bxfix byfix , & ! by = tauy + byfix vrel ! relative ice-ocean velocity @@ -1943,8 +1943,8 @@ subroutine calc_bvec (nx_block, ny_block, & j = indxuj(ij) ! ice/ocean stress - taux = vrel(i,j)*waterx(i,j) ! NOTE this is not the entire - tauy = vrel(i,j)*watery(i,j) ! ocn stress term + taux = vrel(i,j)*waterxU(i,j) ! NOTE this is not the entire + tauy = vrel(i,j)*wateryU(i,j) ! ocn stress term ! divergence of the internal stress tensor (only Pr part, i.e. dPr/dx, dPr/dy) strintx = uarear(i,j)* & @@ -2831,7 +2831,7 @@ subroutine fgmres (zetax2 , etax2 , & solx (:,:,iblk) , soly (:,:,iblk), & vrel (:,:,iblk) , Cb (:,:,iblk), & zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:), & - umassdti (:,:,iblk) , fm (:,:,iblk), & + umassdti (:,:,iblk) , fmU (:,:,iblk), & uarear (:,:,iblk) , & workspace_x(:,:,iblk) , workspace_y(:,:,iblk)) call residual_vec (nx_block , ny_block , & @@ -2938,7 +2938,7 @@ subroutine fgmres (zetax2 , etax2 , & workspace_x(:,:,iblk) , workspace_y(:,:,iblk), & vrel (:,:,iblk) , Cb (:,:,iblk), & zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:), & - umassdti (:,:,iblk) , fm (:,:,iblk), & + umassdti (:,:,iblk) , fmU (:,:,iblk), & uarear (:,:,iblk) , & arnoldi_basis_x(:,:,iblk,nextit), & arnoldi_basis_y(:,:,iblk,nextit)) @@ -3224,7 +3224,7 @@ subroutine pgmres (zetax2 , etax2 , & solx (:,:,iblk) , soly (:,:,iblk), & vrel (:,:,iblk) , Cb (:,:,iblk), & zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:), & - umassdti (:,:,iblk) , fm (:,:,iblk), & + umassdti (:,:,iblk) , fmU (:,:,iblk), & uarear (:,:,iblk) , & workspace_x(:,:,iblk) , workspace_y(:,:,iblk)) call residual_vec (nx_block , ny_block , & @@ -3320,7 +3320,7 @@ subroutine pgmres (zetax2 , etax2 , & workspace_x(:,:,iblk) , workspace_y(:,:,iblk), & vrel (:,:,iblk) , Cb (:,:,iblk), & zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:), & - umassdti (:,:,iblk) , fm (:,:,iblk), & + umassdti (:,:,iblk) , fmU (:,:,iblk), & uarear (:,:,iblk) , & arnoldi_basis_x(:,:,iblk,nextit), & arnoldi_basis_y(:,:,iblk,nextit)) diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index 18727b63e..845491d2a 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -65,16 +65,16 @@ module ice_flux sig1 , & ! normalized principal stress component sig2 , & ! normalized principal stress component sigP , & ! internal ice pressure (N/m) - taubx , & ! seabed stress (x) (N/m^2) - tauby , & ! seabed stress (y) (N/m^2) - strairx , & ! stress on ice by air, x-direction at U points - strairy , & ! stress on ice by air, y-direction at U points - strocnx , & ! ice-ocean stress, x-direction at U points, computed in dyn_finish - strocny , & ! ice-ocean stress, y-direction at U points, computed in dyn_finish - strtltx , & ! stress due to sea surface slope, x-direction - strtlty , & ! stress due to sea surface slope, y-direction - strintx , & ! divergence of internal ice stress, x (N/m^2) - strinty , & ! divergence of internal ice stress, y (N/m^2) + taubxU , & ! seabed stress (x) (N/m^2) + taubyU , & ! seabed stress (y) (N/m^2) + strairxU, & ! stress on ice by air, x-direction at U points + strairyU, & ! stress on ice by air, y-direction at U points + strocnxU, & ! ice-ocean stress, x-direction at U points, computed in dyn_finish + strocnyU, & ! ice-ocean stress, y-direction at U points, computed in dyn_finish + strtltxU, & ! stress due to sea surface slope, x-direction + strtltyU, & ! stress due to sea surface slope, y-direction + strintxU, & ! divergence of internal ice stress, x (N/m^2) + strintyU, & ! divergence of internal ice stress, y (N/m^2) taubxN , & ! seabed stress (x) at N points (N/m^2) taubyN , & ! seabed stress (y) at N points (N/m^2) strairxN, & ! stress on ice by air, x-direction at N points @@ -129,23 +129,11 @@ module ice_flux stresspT, stressmT, stress12T, & ! sigma11+sigma22, sigma11-sigma22, sigma12 stresspU, stressmU, stress12U ! " - logical (kind=log_kind), & - dimension (:,:,:), allocatable, public :: & - iceumask ! ice extent mask (U-cell) - - logical (kind=log_kind), & - dimension (:,:,:), allocatable, public :: & - icenmask ! ice extent mask (N-cell) - - logical (kind=log_kind), & - dimension (:,:,:), allocatable, public :: & - iceemask ! ice extent mask (E-cell) - ! internal real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - fm , & ! Coriolis param. * mass in U-cell (kg/s) - Tbu , & ! factor for seabed stress (N/m^2) + fmU , & ! Coriolis param. * mass in U-cell (kg/s) + TbU , & ! factor for seabed stress (N/m^2) fmE , & ! Coriolis param. * mass in E-cell (kg/s) TbE , & ! factor for seabed stress (N/m^2) fmN , & ! Coriolis param. * mass in N-cell (kg/s) @@ -406,16 +394,16 @@ subroutine alloc_flux sig1 (nx_block,ny_block,max_blocks), & ! normalized principal stress component sig2 (nx_block,ny_block,max_blocks), & ! normalized principal stress component sigP (nx_block,ny_block,max_blocks), & ! internal ice pressure (N/m) - taubx (nx_block,ny_block,max_blocks), & ! seabed stress (x) (N/m^2) - tauby (nx_block,ny_block,max_blocks), & ! seabed stress (y) (N/m^2) - strairx (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction - strairy (nx_block,ny_block,max_blocks), & ! stress on ice by air, y-direction - strocnx (nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction - strocny (nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction - strtltx (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, x-direction - strtlty (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction - strintx (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x (N/m^2) - strinty (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y (N/m^2) + taubxU (nx_block,ny_block,max_blocks), & ! seabed stress (x) (N/m^2) + taubyU (nx_block,ny_block,max_blocks), & ! seabed stress (y) (N/m^2) + strairxU (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction + strairyU (nx_block,ny_block,max_blocks), & ! stress on ice by air, y-direction + strocnxU (nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction + strocnyU (nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction + strtltxU (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, x-direction + strtltyU (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction + strintxU (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x (N/m^2) + strintyU (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y (N/m^2) daidtd (nx_block,ny_block,max_blocks), & ! ice area tendency due to transport (1/s) dvidtd (nx_block,ny_block,max_blocks), & ! ice volume tendency due to transport (m/s) dagedtd (nx_block,ny_block,max_blocks), & ! ice age tendency due to transport (s/s) @@ -435,9 +423,8 @@ subroutine alloc_flux stress12_2 (nx_block,ny_block,max_blocks), & ! sigma12 stress12_3 (nx_block,ny_block,max_blocks), & ! sigma12 stress12_4 (nx_block,ny_block,max_blocks), & ! sigma12 - iceumask (nx_block,ny_block,max_blocks), & ! ice extent mask (U-cell) - fm (nx_block,ny_block,max_blocks), & ! Coriolis param. * mass in U-cell (kg/s) - Tbu (nx_block,ny_block,max_blocks), & ! factor for seabed stress (landfast ice) + fmU (nx_block,ny_block,max_blocks), & ! Coriolis param. * mass in U-cell (kg/s) + TbU (nx_block,ny_block,max_blocks), & ! factor for seabed stress (landfast ice) zlvl (nx_block,ny_block,max_blocks), & ! atm level height (momentum) (m) zlvs (nx_block,ny_block,max_blocks), & ! atm level height (scalar quantities) (m) uatm (nx_block,ny_block,max_blocks), & ! wind velocity components (m/s) @@ -592,7 +579,6 @@ subroutine alloc_flux strtltyN (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction at N points strintxN (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x at N points (N/m^2) strintyN (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y at N points (N/m^2) - icenmask (nx_block,ny_block,max_blocks), & ! ice extent mask (N-cell) fmN (nx_block,ny_block,max_blocks), & ! Coriolis param. * mass in N-cell (kg/s) TbN (nx_block,ny_block,max_blocks), & ! factor for seabed stress (landfast ice) taubxE (nx_block,ny_block,max_blocks), & ! seabed stress (x) at E points (N/m^2) @@ -605,7 +591,6 @@ subroutine alloc_flux strtltyE (nx_block,ny_block,max_blocks), & ! stress due to sea surface slope, y-direction at E points strintxE (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, x at E points (N/m^2) strintyE (nx_block,ny_block,max_blocks), & ! divergence of internal ice stress, y at E points (N/m^2) - iceemask (nx_block,ny_block,max_blocks), & ! ice extent mask (E-cell) fmE (nx_block,ny_block,max_blocks), & ! Coriolis param. * mass in E-cell (kg/s) TbE (nx_block,ny_block,max_blocks), & ! factor for seabed stress (landfast ice) stresspT (nx_block,ny_block,max_blocks), & ! sigma11+sigma22 @@ -1041,17 +1026,17 @@ subroutine init_history_dyn sig1 (:,:,:) = c0 sig2 (:,:,:) = c0 - taubx (:,:,:) = c0 - tauby (:,:,:) = c0 + taubxU (:,:,:) = c0 + taubyU (:,:,:) = c0 strength (:,:,:) = c0 - strocnx (:,:,:) = c0 - strocny (:,:,:) = c0 - strairx (:,:,:) = c0 - strairy (:,:,:) = c0 - strtltx (:,:,:) = c0 - strtlty (:,:,:) = c0 - strintx (:,:,:) = c0 - strinty (:,:,:) = c0 + strocnxU(:,:,:) = c0 + strocnyU(:,:,:) = c0 + strairxU(:,:,:) = c0 + strairyU(:,:,:) = c0 + strtltxU(:,:,:) = c0 + strtltyU(:,:,:) = c0 + strintxU(:,:,:) = c0 + strintyU(:,:,:) = c0 dardg1dt(:,:,:) = c0 dardg2dt(:,:,:) = c0 dvirdgdt(:,:,:) = c0 @@ -1060,7 +1045,7 @@ subroutine init_history_dyn dvidtd (:,:,:) = vice(:,:,:) ! temporary initial volume if (tr_iage) & dagedtd (:,:,:) = trcr(:,:,nt_iage,:) ! temporary initial age - fm (:,:,:) = c0 + fmU (:,:,:) = c0 ardgn (:,:,:,:) = c0 vrdgn (:,:,:,:) = c0 krdgn (:,:,:,:) = c1 diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 9b6bf673c..744b6bfe5 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -14,7 +14,8 @@ module ice_init use ice_kinds_mod use ice_communicate, only: my_task, master_task, ice_barrier - use ice_constants, only: c0, c1, c2, c3, c5, p2, p3, p5, p75, p166 + use ice_constants, only: c0, c1, c2, c3, c5, c12, p2, p3, p5, p75, p166, & + cm_to_m use ice_exit, only: abort_ice use ice_fileunits, only: nu_nml, nu_diag, nu_diag_set, nml_filename, diag_type, & ice_stdout, get_fileunit, release_fileunit, bfbflag, flush_fileunit, & @@ -2833,6 +2834,13 @@ subroutine set_state_var (nx_block, ny_block, & edge_init_nh = 70._dbl_kind, & ! initial ice edge, N.Hem. (deg) edge_init_sh = -60._dbl_kind ! initial ice edge, S.Hem. (deg) + real (kind=dbl_kind) :: & ! boxslotcyl + pi , & ! pi + secday , & ! seconds per day + max_vel , & ! max velocity + domain_length , & ! physical domain length + period ! rotational period + logical (kind=log_kind) :: tr_brine, tr_lvl, tr_snow integer (kind=int_kind) :: ntrcr integer (kind=int_kind) :: nt_Tsfc, nt_qice, nt_qsno, nt_sice @@ -2853,6 +2861,7 @@ subroutine set_state_var (nx_block, ny_block, & nt_rhos_out=nt_rhos, nt_rsnw_out=nt_rsnw) call icepack_query_parameters(rhos_out=rhos, Lfresh_out=Lfresh, puny_out=puny, & rad_to_deg_out=rad_to_deg, rsnw_fall_out=rsnw_fall) + call icepack_query_parameters(secday_out=secday, pi_out=pi) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) @@ -3195,12 +3204,16 @@ subroutine set_state_var (nx_block, ny_block, & !--------------------------------------------------------- if (trim(ice_data_type) == 'boxslotcyl') then + domain_length = dxrect*cm_to_m*nx_global + period = c12*secday ! 12 days rotational period + max_vel = pi*domain_length/period do j = 1, ny_block do i = 1, nx_block - call boxslotcyl_data_vel(i, j, & - nx_block, ny_block, & - iglob, jglob, & - uvel, vvel) + + uvel(i,j) = c2*max_vel*(real(jglob(j), kind=dbl_kind) - p5) & + / real(ny_global - 1, kind=dbl_kind) - max_vel + vvel(i,j) = -c2*max_vel*(real(iglob(i), kind=dbl_kind) - p5) & + / real(nx_global - 1, kind=dbl_kind) + max_vel enddo ! j enddo ! i else @@ -3216,56 +3229,6 @@ subroutine set_state_var (nx_block, ny_block, & end subroutine set_state_var -!======================================================================= - -! Set ice velocity for slotted cylinder advection test -! -! author: Philippe Blain (ECCC) - - subroutine boxslotcyl_data_vel(i, j, & - nx_block, ny_block, & - iglob, jglob, & - uvel, vvel) - - use ice_constants, only: c2, c12, p5, cm_to_m - use ice_domain_size, only: nx_global, ny_global - use ice_grid, only: dxrect - - integer (kind=int_kind), intent(in) :: & - i, j, & ! local indices - nx_block, ny_block, & ! block dimensions - iglob(nx_block), & ! global indices - jglob(ny_block) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - uvel, vvel ! ice velocity - - ! local variables - real (kind=dbl_kind) :: & - pi , & ! pi - secday , & ! seconds per day - max_vel , & ! max velocity - domain_length , & ! physical domain length - period ! rotational period - - character(len=*), parameter :: subname = '(boxslotcyl_data_vel)' - - call icepack_query_parameters(secday_out=secday, pi_out=pi) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - domain_length = dxrect*cm_to_m*nx_global - period = c12*secday ! 12 days rotational period - max_vel = pi*domain_length/period - - uvel(i,j) = c2*max_vel*(real(jglob(j), kind=dbl_kind) - p5) & - / real(ny_global - 1, kind=dbl_kind) - max_vel - vvel(i,j) = -c2*max_vel*(real(iglob(i), kind=dbl_kind) - p5) & - / real(nx_global - 1, kind=dbl_kind) + max_vel - - end subroutine boxslotcyl_data_vel - !======================================================================= end module ice_init diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index f7b854b4f..0ea779399 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -173,8 +173,7 @@ module ice_grid use_bathymetry, & ! flag for reading in bathymetry_file pgl_global_ext ! flag for init primary grid lengths (global ext.) - logical (kind=log_kind), & - dimension (:,:,:), allocatable, public :: & + logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & tmask , & ! land/boundary mask, thickness (T-cell) umask , & ! land/boundary mask (U-cell) (1 if all surrounding T cells are ocean) umaskCD, & ! land/boundary mask (U-cell) (1 if at least two surrounding T cells are ocean) @@ -183,6 +182,11 @@ module ice_grid lmask_n, & ! northern hemisphere mask lmask_s ! southern hemisphere mask + logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & + iceumask, & ! ice extent mask (U-cell) + icenmask, & ! ice extent mask (N-cell) + iceemask ! ice extent mask (E-cell) + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & rndex_global ! global index for local subdomain (dbl) @@ -263,6 +267,7 @@ subroutine alloc_grid umaskCD (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) nmask (nx_block,ny_block,max_blocks), & ! land/boundary mask (N-cell) emask (nx_block,ny_block,max_blocks), & ! land/boundary mask (E-cell) + iceumask (nx_block,ny_block,max_blocks), & ! u mask for dynamics lmask_n (nx_block,ny_block,max_blocks), & ! northern hemisphere mask lmask_s (nx_block,ny_block,max_blocks), & ! southern hemisphere mask rndex_global(nx_block,ny_block,max_blocks), & ! global index for local subdomain (dbl) @@ -283,6 +288,8 @@ subroutine alloc_grid if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate( & + iceemask (nx_block,ny_block,max_blocks), & ! e mask for dynamics + icenmask (nx_block,ny_block,max_blocks), & ! n mask for dynamics ratiodxN (nx_block,ny_block,max_blocks), & ratiodyE (nx_block,ny_block,max_blocks), & ratiodxNr(nx_block,ny_block,max_blocks), & diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 2e236b62a..70e70621a 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -56,14 +56,14 @@ subroutine dumpfile(filename_spec) use ice_domain, only: nblocks use ice_domain_size, only: nilyr, nslyr, ncat, max_blocks use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, iceumask, iceemask, icenmask, & + strocnxT, strocnyT, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: grid_ice, tmask + use ice_grid, only: grid_ice, tmask, iceumask, iceemask, icenmask use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel, & uvelE, vvelE, uvelN, vvelN @@ -277,14 +277,15 @@ subroutine restartfile (ice_ic) use ice_domain_size, only: nilyr, nslyr, ncat, & max_blocks use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, iceumask, iceemask, icenmask, & + strocnxT, strocnyT, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: tmask, grid_type, grid_ice + use ice_grid, only: tmask, grid_type, grid_ice, & + iceumask, iceemask, icenmask use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & uvelE, vvelE, uvelN, vvelN, & @@ -707,12 +708,12 @@ subroutine restartfile_v4 (ice_ic) use ice_domain_size, only: nilyr, nslyr, ncat, nx_global, ny_global, & max_blocks use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, iceumask, & + strocnxT, strocnyT, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_gather_scatter, only: scatter_global_stress - use ice_grid, only: tmask + use ice_grid, only: tmask, iceumask use ice_read_write, only: ice_open, ice_read, ice_read_global use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index df8e4616b..a0e8df9ba 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -251,7 +251,7 @@ either Celsius or Kelvin units). "flux_bio_ai", "all biogeochemistry fluxes passed to ocean, grid cell mean", "" "flw", "incoming longwave radiation", "W/m\ :math:`^2`" "flwout", "outgoing longwave radiation", "W/m\ :math:`^2`" - "fm", "Coriolis parameter * mass in U cell", "kg/s" + "fmU", "Coriolis parameter * mass in U cell", "kg/s" "formdrag", "calculate form drag", "" "fpond", "fresh water flux to ponds", "kg/m\ :math:`^2`/s" "fr_resp", "bgc respiration fraction", "0.05" @@ -355,6 +355,8 @@ either Celsius or Kelvin units). "icells", "number of grid cells with specified property (for vectorization)", "" "iceruf", "ice surface roughness at atmosphere interface", "5.\ :math:`\times`\ 10\ :math:`^{-4}` m" "iceruf_ocn", "under-ice roughness (at ocean interface)", "0.03 m" + "iceemask", "ice extent mask (E-cell)", "" + "icenmask", "ice extent mask (N-cell)", "" "icetmask", "ice extent mask (T-cell)", "" "iceumask", "ice extent mask (U-cell)", "" "idate", "the date at the end of the current time step (yyyymmdd)", "" @@ -655,17 +657,17 @@ either Celsius or Kelvin units). "Sswabs", "shortwave radiation absorbed in snow layers", "W/m\ :math:`^2`" "stefan-boltzmann", "Stefan-Boltzmann constant", "5.67\ :math:`\times`\ 10\ :math:`^{-8}` W/m\ :math:`^2`\ K\ :math:`^4`" "stop_now", "if 1, end program execution", "" - "strairx(y)", "stress on ice by air in the x(y)-direction (centered in U cell)", "N/m\ :math:`^2`" + "strairx(y)U", "stress on ice by air in the x(y)-direction (centered in U cell)", "N/m\ :math:`^2`" "strairx(y)T", "stress on ice by air, x(y)-direction (centered in T cell)", "N/m\ :math:`^2`" "strax(y)", "wind stress components from data", "N/m\ :math:`^2`" "strength", "ice strength", "N/m" "stress12", "internal ice stress, :math:`\sigma_{12}`", "N/m" "stressm", "internal ice stress, :math:`\sigma_{11}-\sigma_{22}`", "N/m" "stressp", "internal ice stress, :math:`\sigma_{11}+\sigma_{22}`", "N/m" - "strintx(y)", "divergence of internal ice stress, x(y)", "N/m\ :math:`^2`" - "strocnx(y)", "ice–ocean stress in the x(y)-direction (U-cell)", "N/m\ :math:`^2`" + "strintx(y)U", "divergence of internal ice stress, x(y)", "N/m\ :math:`^2`" + "strocnx(y)U", "ice–ocean stress in the x(y)-direction (U-cell)", "N/m\ :math:`^2`" "strocnx(y)T", "ice–ocean stress, x(y)-dir. (T-cell)", "N/m\ :math:`^2`" - "strtltx(y)", "surface stress due to sea surface slope", "N/m\ :math:`^2`" + "strtltx(y)U", "surface stress due to sea surface slope", "N/m\ :math:`^2`" "swv(n)dr(f)", "incoming shortwave radiation, visible (near IR), direct (diffuse)", "W/m\ :math:`^2`" "**T**", "", "" "Tair", "air temperature at 10 m", "K" From 1585c31da4a2d32be6a29cfd0bbd5e6c9bcec954 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Fri, 15 Jul 2022 07:43:12 -0700 Subject: [PATCH 02/57] Add unit test for optional arguments, "optargs" (#730) * Add optargs "optional arguments" unit test. This tests the ability to pass optional arguments down the calling tree robustly whether they are present or not. * Add test to count optional arguments at 2nd level --- cicecore/drivers/unittest/optargs/optargs.F90 | 246 ++++++++++++++++++ .../drivers/unittest/optargs/optargs_subs.F90 | 148 +++++++++++ configuration/scripts/Makefile | 8 +- configuration/scripts/options/set_env.optargs | 2 + configuration/scripts/tests/unittest_suite.ts | 1 + 5 files changed, 403 insertions(+), 2 deletions(-) create mode 100644 cicecore/drivers/unittest/optargs/optargs.F90 create mode 100644 cicecore/drivers/unittest/optargs/optargs_subs.F90 create mode 100644 configuration/scripts/options/set_env.optargs diff --git a/cicecore/drivers/unittest/optargs/optargs.F90 b/cicecore/drivers/unittest/optargs/optargs.F90 new file mode 100644 index 000000000..14c738d47 --- /dev/null +++ b/cicecore/drivers/unittest/optargs/optargs.F90 @@ -0,0 +1,246 @@ + + program optargs + + use optargs_subs, only: computeA, computeB, computeC, computeD + use optargs_subs, only: oa_error, oa_OK, oa_A, oa_B, oa_C, oa_D + use optargs_subs, only: oa_layer1, oa_count1 + + implicit none + + real*8 :: Ai1, Ao + real*8 :: B + real*8 :: Ci1, Co + real*8 :: Di1, Di2, Do + integer :: ierr, ierrV + + integer :: n + integer, parameter :: ntests = 100 + integer :: iresult + real*8 :: result, resultV + real*8, parameter :: errtol = 1.0e-12 + + !---------------------- + + write(6,*) 'RunningUnitTest optargs' + write(6,*) ' ' + + iresult = 0 + do n = 1,ntests + + Ai1 = -99.; Ao = -99. + B = -99. + Ci1 = -99.; Co = -99. + Di1 = -99.; Di2 = -99.; Do = -99. + + ierr = oa_error + result = -888. + resultV = -999. + + computeA = .false. + computeB = .false. + computeC = .false. + computeD = .false. + + select case (n) + +! fails to compile as it should +! case(0) +! ierrV = oa_OK +! call oa_layer1() + + ! test counts of present optional arguments at 2nd level + ! result should be number of arguments + case(1) + result = -777.; resultV = -777. + ierrV = 9 + call oa_count1(Ai1=Ai1,Ao=Ao,B=B,Ci1=Ci1,Co=Co,Di1=Di1,Di2=Di2,Do=Do,ierr=ierr) + case(2) + result = -777.; resultV = -777. + ierrV = 9 + call oa_count1(Ai1,Ao,B,Ci1,Co,Di1,Di2,Do,ierr) + case(3) + result = -777.; resultV = -777. + ierrV = 3 + call oa_count1(Ci1=Ci1,Co=Co,ierr=ierr) + case(4) + result = -777.; resultV = -777. + ierrV = 5 + call oa_count1(Ci1=Ci1,Co=Co,ierr=ierr,Ao=Ao,Di1=Di1) + + ! test optional order + case(11) + result = -777.; resultV = -777. + ierrV = oa_OK + call oa_layer1(Ai1=Ai1,Ao=Ao,B=B,Ci1=Ci1,Co=Co,Di1=Di1,Di2=Di2,Do=Do,ierr=ierr) + case(12) + result = -777.; resultV = -777. + ierrV = oa_OK + call oa_layer1(Ci1=Ci1,Co=Co,ierr=ierr) + case(13) + result = -777.; resultV = -777. + ierrV = oa_OK + call oa_layer1(Ci1=Ci1,Co=Co,ierr=ierr,Ao=Ao,Di1=Di1) + + ! test optional argument checking + case(21) + computeA = .true. + computeB = .true. + computeC = .true. + computeD = .true. + result = -777.; resultV = -777. + ierrV = oa_error + ! B missing + call oa_layer1(Ai1=Ai1,Ao=Ao,Ci1=Ci1,Co=Co,Di1=Di1,Di2=Di2,Do=Do,ierr=ierr) + case(22) + computeA = .true. + computeB = .true. + computeC = .true. + computeD = .true. + result = -777.; resultV = -777. + ierrV = oa_error + ! all optional missing + call oa_layer1(Ci1=Ci1,Co=Co,ierr=ierr) + case(23) + computeA = .true. + computeB = .true. + computeC = .true. + computeD = .true. + result = -777.; resultV = -777. + ierrV = oa_error + ! some optional missing + call oa_layer1(Ci1=Ci1,Co=Co,ierr=ierr,B=B,Ao=Ao,Di1=Di1) + case(24) + computeA = .true. + computeB = .true. + computeC = .true. + computeD = .true. + result = -777.; resultV = -777. + ierrV = oa_error + ! one optional missing + call oa_layer1(Ai1=Ai1,Ao=Ao,B=B,Ci1=Ci1,Co=Co,Di1=Di1,Do=Do,ierr=ierr) + + ! test computations individually + case(31) + computeA = .true. + ierrV = oa_A + Ai1 = 5. + resultV = 4. + call oa_layer1(Ai1=Ai1,Ao=Ao,B=B,Ci1=Ci1,Co=Co,Di1=Di1,Di2=Di2,Do=Do,ierr=ierr) + result = Ao + case(32) + computeB = .true. + ierrV = oa_B + B = 15. + resultV = 20. + call oa_layer1(ierr=ierr,Ai1=Ai1,Ao=Ao,B=B,Ci1=Ci1,Co=Co,Di1=Di1,Di2=Di2,Do=Do) + result = B + case(33) + computeC = .true. + ierrV = oa_C + Ci1 = 7. + resultV = 14. + call oa_layer1(B=B,Ci1=Ci1,Co=Co,Di1=Di1,Ai1=Ai1,Ao=Ao,Di2=Di2,Do=Do,ierr=ierr) + result = Co + case(34) + computeD = .true. + ierrV = oa_D + Di1 = 19; Di2=11. + resultV = 30. + call oa_layer1(Ai1=Ai1,Ao=Ao,Ci1=Ci1,Co=Co,Di1=Di1,Di2=Di2,Do=Do,B=B,ierr=ierr) + result = Do + + ! test computations individually + case(41) + computeA = .true. + computeC = .true. + ierrV = oa_A + oa_C + Ai1 = 6. + Ci1 = 8. + resultV = 21. + call oa_layer1(Ai1=Ai1,Ao=Ao,B=B,Ci1=Ci1,Co=Co,Di1=Di1,Di2=Di2,Do=Do,ierr=ierr) + result = Ao + Co + case(42) + computeB = .true. + computeC = .true. + ierrV = oa_B + oa_C + B = -20. + Ci1 = 2. + resultV = -11. + call oa_layer1(ierr=ierr,Ai1=Ai1,Ao=Ao,B=B,Ci1=Ci1,Co=Co,Di1=Di1,Di2=Di2,Do=Do) + result = B + Co + case(43) + computeB = .true. + computeD = .true. + ierrV = oa_B + oa_D + B = 4. + Di1 = 3; Di2=19. + resultV = 31. + call oa_layer1(B=B,Ci1=Ci1,Co=Co,Di1=Di1,Ai1=Ai1,Ao=Ao,Di2=Di2,Do=Do,ierr=ierr) + result = B + Do + case(44) + computeC = .true. + computeD = .true. + ierrV = oa_C + oa_D + Ci1 = 7. + Di1 = 6; Di2=7. + resultV = 27. + call oa_layer1(Ai1=Ai1,Ao=Ao,Ci1=Ci1,Co=Co,Di1=Di1,Di2=Di2,Do=Do,B=B,ierr=ierr) + result = Co + Do + case(45) + computeA = .true. + computeB = .true. + computeC = .true. + computeD = .true. + ierrV = oa_A + oa_B + oa_C + oa_D + Ai1 = 7. + B = 9. + Ci1 = 7. + Di1 = 12; Di2=3. + resultV = 49. + call oa_layer1(Ao=Ao,B=B,Co=Co,Do=Do,Ai1=Ai1,Ci1=Ci1,Di1=Di1,Di2=Di2,ierr=ierr) + result = Ao + B + Co + Do + case(46) + computeA = .true. + computeB = .true. + computeD = .true. + ierrV = oa_A + oa_B + oa_D + Ai1 = 10. + B = 11. + Di1 = 12; Di2=3. + resultV = 40. + call oa_layer1(Ao=Ao,B=B,Co=Co,Do=Do,Ai1=Ai1,Ci1=Ci1,Di1=Di1,Di2=Di2,ierr=ierr) + result = Ao + B + Do + + case DEFAULT + ierr = -1234 + + end select + + ! skip -1234 + if (ierr /= -1234) then + if (ierr == ierrV .and. abs(result-resultV) < errtol ) then + write(6,101) 'PASS','optarg test',n,ierr,ierrV,result,resultV,Ao,B,Co,Do +! write(6,101) 'PASS','optarg test',n,ierr,ierrV,result,resultV + else + write(6,101) 'FAIL','optarg test',n,ierr,ierrV,result,resultV,Ao,B,Co,Do +! write(6,101) 'FAIL','optarg test',n,ierr,ierrV,result,resultV + iresult = 1 + endif + endif + + enddo + + 101 format(1x,a,1x,a,1x,i2.2,2i6,3x,6g11.4) + + write(6,*) ' ' + write(6,*) 'optargs COMPLETED SUCCESSFULLY' + if (iresult == 1) then + write(6,*) 'optargs TEST FAILED' + else + write(6,*) 'optargs TEST COMPLETED SUCCESSFULLY' + endif + + !---------------------- + + end program + diff --git a/cicecore/drivers/unittest/optargs/optargs_subs.F90 b/cicecore/drivers/unittest/optargs/optargs_subs.F90 new file mode 100644 index 000000000..7469d6800 --- /dev/null +++ b/cicecore/drivers/unittest/optargs/optargs_subs.F90 @@ -0,0 +1,148 @@ + + module optargs_subs + + implicit none + private + + logical, public :: computeA = .false., & + computeB = .false., & + computeC = .false., & + computeD = .false. + + integer, public :: oa_error = -99, & + oa_OK = 0, & + oa_A = 1, & + oa_B = 2, & + oa_C = 4, & + oa_D = 8 + + public :: oa_layer1, oa_count1 + +!----------------------------------- +CONTAINS +!----------------------------------- + + subroutine oa_count1(Ai1,Ao,B,Ci1,Co,Di1,Di2,Do,ierr) + + real*8 , intent(in) , optional :: Ai1, Di1, Di2 + real*8 , intent(out) , optional :: Ao, Do + real*8 , intent(inout), optional :: B + real*8 , intent(in) :: Ci1 + real*8 , intent(out) :: Co + integer, intent(inout) :: ierr + + call oa_count2(Ai1,Ao,B,Ci1,Co,Di1,Di2,Do,ierr) + +! write(6,*) 'debug oa_count1 ',ierr + + end subroutine oa_count1 + +!----------------------------------- + + subroutine oa_count2(Ai1,Ao,B,Ci1,Co,Di1,Di2,Do,ierr) + + real*8 , intent(in) , optional :: Ai1, Di1, Di2 + real*8 , intent(out) , optional :: Ao, Do + real*8 , intent(inout), optional :: B + real*8 , intent(in) :: Ci1 + real*8 , intent(out) :: Co + integer, intent(inout) :: ierr + + ierr = 3 ! Ci1, Co, ierr have to be passed + if (present(Ai1)) ierr = ierr + 1 + if (present(Ao) ) ierr = ierr + 1 + if (present(B) ) ierr = ierr + 1 + if (present(Di1)) ierr = ierr + 1 + if (present(Di2)) ierr = ierr + 1 + if (present(Do) ) ierr = ierr + 1 + +! write(6,*) 'debug oa_count2 ',ierr + + end subroutine oa_count2 + +!----------------------------------- + + subroutine oa_layer1(Ai1,Ao,B,Ci1,Co,Di1,Di2,Do,ierr) + + real*8 , intent(in) , optional :: Ai1, Di1, Di2 + real*8 , intent(out) , optional :: Ao, Do + real*8 , intent(inout), optional :: B + real*8 , intent(in) :: Ci1 + real*8 , intent(out) :: Co + integer, intent(inout) :: ierr + + ierr = oa_OK + if (computeA) then + if (.not.(present(Ai1).and.present(Ao))) then + ierr = oa_error + endif + endif + if (computeB) then + if (.not.(present(B))) then + ierr = oa_error + endif + endif + if (computeD) then + if (.not.(present(Di1).and.present(Di2).and.present(Do))) then + ierr = oa_error + endif + endif + + if (ierr == oa_OK) then + call oa_layer2(Ai1,Ao,B,Ci1,Co,Di1,Di2,Do,ierr) + endif + + end subroutine oa_layer1 + +!----------------------------------- + + subroutine oa_layer2(Ai1,Ao,B,Ci1,Co,Di1,Di2,Do,ierr) + + real*8 , intent(in) , optional :: Ai1, Di1, Di2 + real*8 , intent(out) , optional :: Ao, Do + real*8 , intent(inout), optional :: B + real*8 , intent(in) :: Ci1 + real*8 , intent(out) :: Co + integer, intent(inout) :: ierr + + call oa_compute(Ai1,Ao,B,Ci1,Co,Di1,Di2,Do,ierr) + + end subroutine oa_layer2 + +!----------------------------------- + + subroutine oa_compute(Ai1,Ao,B,Ci1,Co,Di1,Di2,Do,ierr) + + real*8 , intent(in) , optional :: Ai1, Di1, Di2 + real*8 , intent(out) , optional :: Ao, Do + real*8 , intent(inout), optional :: B + real*8 , intent(in) :: Ci1 + real*8 , intent(out) :: Co + integer, intent(inout) :: ierr + + if (computeA) then + Ao = Ai1 - 1. + ierr = ierr + oa_A + endif + + if (computeB) then + B = B + 5. + ierr = ierr + oa_B + endif + + if (computeC) then + Co = Ci1 * (2.) + ierr = ierr + oa_C + endif + + if (computeD) then + Do = Di1 + Di2 + ierr = ierr + oa_D + endif + + return + end subroutine oa_compute + +!----------------------------------- + + end module optargs_subs diff --git a/configuration/scripts/Makefile b/configuration/scripts/Makefile index 0322513d2..a2f17256f 100644 --- a/configuration/scripts/Makefile +++ b/configuration/scripts/Makefile @@ -74,7 +74,7 @@ AR := ar .SUFFIXES: -.PHONY: all cice libcice targets target db_files db_flags clean realclean helloworld calchk sumchk bcstchk gridavgchk +.PHONY: all cice libcice targets target db_files db_flags clean realclean helloworld calchk sumchk bcstchk gridavgchk optargs all: $(EXEC) cice: $(EXEC) @@ -93,7 +93,7 @@ targets: @echo " " @echo "Supported Makefile Targets are: cice, libcice, makdep, depends, clean, realclean" @echo " Diagnostics: targets, db_files, db_flags" - @echo " Unit Tests : helloworld, calchk, sumchk, bcstchk, gridavgchk" + @echo " Unit Tests : helloworld, calchk, sumchk, bcstchk, gridavgchk, optargs" target: targets db_files: @@ -157,6 +157,10 @@ HWOBJS := helloworld.o helloworld: $(HWOBJS) $(LD) -o $(EXEC) $(LDFLAGS) $(HWOBJS) $(ULIBS) $(SLIBS) +OAOBJS := optargs.o optargs_subs.o +optargs: $(OAOBJS) + $(LD) -o $(EXEC) $(LDFLAGS) $(OAOBJS) $(ULIBS) $(SLIBS) + #------------------------------------------------------------------------------- # build rules: MACFILE, cmd-line, or env vars must provide the needed macros #------------------------------------------------------------------------------- diff --git a/configuration/scripts/options/set_env.optargs b/configuration/scripts/options/set_env.optargs new file mode 100644 index 000000000..84d48137f --- /dev/null +++ b/configuration/scripts/options/set_env.optargs @@ -0,0 +1,2 @@ +setenv ICE_DRVOPT unittest/optargs +setenv ICE_TARGET optargs diff --git a/configuration/scripts/tests/unittest_suite.ts b/configuration/scripts/tests/unittest_suite.ts index 76c9f4312..319c91aa6 100644 --- a/configuration/scripts/tests/unittest_suite.ts +++ b/configuration/scripts/tests/unittest_suite.ts @@ -1,5 +1,6 @@ # Test Grid PEs Sets BFB-compare unittest gx3 1x1 helloworld +unittest gx3 1x1 optargs unittest gx3 1x1 calchk,short unittest gx3 4x1x25x29x4 sumchk unittest gx3 1x1x25x29x16 sumchk From 21bd95be4fa6c261611c12cb7281bd1b33db67d5 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Fri, 15 Jul 2022 10:43:29 -0400 Subject: [PATCH 03/57] cice.setup: remove 'suite.jobs' at start of 'suite.submit' (#731) When running a test suite that includes 'bfbcomp' tests, the test script ('cice.test') queries the file 'suite.jobs' in the suite directory to get the job ID of the test against which to compare for bit-for-bit-ness. If re-running the suite, 'suite.jobs' is not removed, so that the new job IDs are appended to the existing file. This leads to syntax errors when 'cice.test' looks for the job ID to compare against since the 'grep' call returns several matches. Remove 'suite.jobs' at the start of the 'suite.submit' script generated by 'cice.setup' to avoid that. --- cice.setup | 1 + 1 file changed, 1 insertion(+) diff --git a/cice.setup b/cice.setup index 60c56e5c2..4994c7ee1 100755 --- a/cice.setup +++ b/cice.setup @@ -489,6 +489,7 @@ else #!/bin/csh -f set nonomatch && rm -f ciceexe.* && unset nonomatch +rm -f suite.jobs set dobuild = true set doreuse = true From 9be1c35ae62199ae0ea9c4f339105c7aec3192bf Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Sun, 31 Jul 2022 10:15:34 -0600 Subject: [PATCH 04/57] Deprecate CESM ponds (tr_pond_cesm) (#733) * Deprecate CESM ponds * Namelist changes for deprecating cesmponds * Update documentation --- .../cicedynB/analysis/ice_history_pond.F90 | 15 ++++- .../dynamics/ice_transport_driver.F90 | 15 +++++ cicecore/cicedynB/general/ice_init.F90 | 60 +++++++++++++++++++ cicecore/cicedynB/general/ice_step_mod.F90 | 8 +++ .../io/io_binary/ice_restart.F90 | 30 ++++++++++ .../io/io_netcdf/ice_restart.F90 | 10 ++++ .../infrastructure/io/io_pio2/ice_restart.F90 | 10 ++++ .../drivers/direct/hadgem3/CICE_InitMod.F90 | 16 +++++ .../drivers/direct/hadgem3/CICE_RunMod.F90 | 14 +++++ .../direct/nemo_concepts/CICE_InitMod.F90 | 16 +++++ .../direct/nemo_concepts/CICE_RunMod.F90 | 14 +++++ cicecore/drivers/mct/cesm1/CICE_InitMod.F90 | 16 +++++ cicecore/drivers/mct/cesm1/CICE_RunMod.F90 | 14 +++++ cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 | 16 +++++ cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 | 14 +++++ cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 | 16 +++++ cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 | 14 +++++ .../drivers/standalone/cice/CICE_InitMod.F90 | 16 +++++ .../drivers/standalone/cice/CICE_RunMod.F90 | 14 +++++ .../unittest/gridavgchk/CICE_InitMod.F90 | 16 +++++ .../drivers/unittest/sumchk/CICE_InitMod.F90 | 16 +++++ cicecore/shared/ice_init_column.F90 | 15 ++++- cicecore/shared/ice_restart_column.F90 | 7 ++- configuration/scripts/ice_in | 2 - configuration/scripts/options/set_nml.alt01 | 1 - configuration/scripts/options/set_nml.alt02 | 1 - configuration/scripts/options/set_nml.alt03 | 1 - configuration/scripts/options/set_nml.alt04 | 1 - configuration/scripts/options/set_nml.alt05 | 1 - configuration/scripts/options/set_nml.boxadv | 1 - .../scripts/options/set_nml.boxnodyn | 1 - .../scripts/options/set_nml.boxrestore | 1 - doc/source/science_guide/sg_tracers.rst | 6 +- doc/source/user_guide/ug_case_settings.rst | 7 ++- doc/source/user_guide/ug_testing.rst | 1 - icepack | 2 +- 36 files changed, 388 insertions(+), 20 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history_pond.F90 b/cicecore/cicedynB/analysis/ice_history_pond.F90 index 365bd4410..ef9a5237e 100644 --- a/cicecore/cicedynB/analysis/ice_history_pond.F90 +++ b/cicecore/cicedynB/analysis/ice_history_pond.F90 @@ -268,9 +268,13 @@ subroutine accum_hist_pond (iblk) integer (kind=int_kind) :: & nt_apnd, nt_hpnd, nt_alvl, nt_ipnd - +#ifdef UNDEPRECATE_CESMPONDS logical (kind=log_kind) :: & tr_pond_cesm, tr_pond_lvl, tr_pond_topo +#else + logical (kind=log_kind) :: & + tr_pond_lvl, tr_pond_topo +#endif real (kind=dbl_kind) :: & puny @@ -285,8 +289,13 @@ subroutine accum_hist_pond (iblk) !--------------------------------------------------------------- call icepack_query_parameters(puny_out=puny) +#ifdef UNDEPRECATE_CESMPONDS call icepack_query_tracer_flags(tr_pond_cesm_out=tr_pond_cesm, & tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo) +#else + call icepack_query_tracer_flags(tr_pond_lvl_out=tr_pond_lvl, & + tr_pond_topo_out=tr_pond_topo) +#endif call icepack_query_tracer_indices(nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, & nt_alvl_out=nt_alvl, nt_ipnd_out=nt_ipnd) call icepack_warnings_flush(nu_diag) @@ -294,6 +303,7 @@ subroutine accum_hist_pond (iblk) file=__FILE__, line=__LINE__) if (allocated(a2D)) then +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) then if (f_apond(1:1)/= 'x') & call accum_hist_field(n_apond, iblk, & @@ -311,6 +321,9 @@ subroutine accum_hist_pond (iblk) * trcr(:,:,nt_hpnd,iblk), a2D) elseif (tr_pond_lvl) then +#else + if (tr_pond_lvl) then +#endif if (f_apond(1:1)/= 'x') & call accum_hist_field(n_apond, iblk, & trcr(:,:,nt_alvl,iblk) * trcr(:,:,nt_apnd,iblk), a2D) diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 index c3bf4cd15..390631eaa 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 @@ -1533,8 +1533,13 @@ subroutine state_to_work (nx_block, ny_block, & integer (kind=int_kind) :: & nt_alvl, nt_apnd, nt_fbri +#ifdef UNDEPRECATE_CESMPONDS logical (kind=log_kind) :: & tr_pond_cesm, tr_pond_lvl, tr_pond_topo +#else + logical (kind=log_kind) :: & + tr_pond_lvl, tr_pond_topo +#endif integer (kind=int_kind) :: & i, j, n, it, & ! counting indices @@ -1542,8 +1547,13 @@ subroutine state_to_work (nx_block, ny_block, & character(len=*), parameter :: subname = '(state_to_work)' +#ifdef UNDEPRECATE_CESMPONDS call icepack_query_tracer_flags(tr_pond_cesm_out=tr_pond_cesm, & tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo) +#else + call icepack_query_tracer_flags(tr_pond_lvl_out=tr_pond_lvl, & + tr_pond_topo_out=tr_pond_topo) +#endif call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_apnd_out=nt_apnd, & nt_fbri_out=nt_fbri) call icepack_warnings_flush(nu_diag) @@ -1602,8 +1612,13 @@ subroutine state_to_work (nx_block, ny_block, & * trcrn(i,j,it ,n) enddo enddo +#ifdef UNDEPRECATE_CESMPONDS elseif (trcr_depend(it) == 2+nt_apnd .and. & tr_pond_cesm .or. tr_pond_topo) then +#else + elseif (trcr_depend(it) == 2+nt_apnd .and. & + tr_pond_topo) then +#endif do j = 1, ny_block do i = 1, nx_block works(i,j,narrays+it) = aicen(i,j ,n) & diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 744b6bfe5..7a273148a 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -73,9 +73,15 @@ subroutine input_data npt, dt, ndtd, days_per_year, use_leap_years, & write_ic, dump_last, npt_unit use ice_arrays_column, only: oceanmixed_ice +#ifdef UNDEPRECATE_CESMPONDS use ice_restart_column, only: restart_age, restart_FY, restart_lvl, & restart_pond_cesm, restart_pond_lvl, restart_pond_topo, restart_aero, & restart_fsd, restart_iso, restart_snow +#else + use ice_restart_column, only: restart_age, restart_FY, restart_lvl, & + restart_pond_lvl, restart_pond_topo, restart_aero, & + restart_fsd, restart_iso, restart_snow +#endif use ice_restart_shared, only: & restart, restart_ext, restart_coszen, restart_dir, restart_file, pointer_file, & runid, runtype, use_restart_time, restart_format, lcdf64 @@ -157,10 +163,18 @@ subroutine input_data logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_pond logical (kind=log_kind) :: tr_iso, tr_aero, tr_fsd, tr_snow +#ifdef UNDEPRECATE_CESMPONDS logical (kind=log_kind) :: tr_pond_cesm, tr_pond_lvl, tr_pond_topo +#else + logical (kind=log_kind) :: tr_pond_lvl, tr_pond_topo +#endif integer (kind=int_kind) :: numin, numax ! unit number limits +#ifdef UNDEPRECATE_CESMPONDS integer (kind=int_kind) :: rpcesm, rplvl, rptopo +#else + integer (kind=int_kind) :: rplvl, rptopo +#endif real (kind=dbl_kind) :: Cf, ksno, puny character (len=char_len) :: abort_list character (len=64) :: tmpstr @@ -201,7 +215,9 @@ subroutine input_data tr_iage, restart_age, & tr_FY, restart_FY, & tr_lvl, restart_lvl, & +#ifdef UNDEPRECATE_CESMPONDS tr_pond_cesm, restart_pond_cesm, & +#endif tr_pond_lvl, restart_pond_lvl, & tr_pond_topo, restart_pond_topo, & tr_snow, restart_snow, & @@ -526,8 +542,10 @@ subroutine input_data restart_FY = .false. ! ice age restart tr_lvl = .false. ! level ice restart_lvl = .false. ! level ice restart +#ifdef UNDEPRECATE_CESMPONDS tr_pond_cesm = .false. ! CESM melt ponds restart_pond_cesm = .false. ! melt ponds restart +#endif tr_pond_lvl = .false. ! level-ice melt ponds restart_pond_lvl = .false. ! melt ponds restart tr_pond_topo = .false. ! explicit melt ponds (topographic) @@ -993,8 +1011,10 @@ subroutine input_data call broadcast_scalar(restart_FY, master_task) call broadcast_scalar(tr_lvl, master_task) call broadcast_scalar(restart_lvl, master_task) +#ifdef UNDEPRECATE_CESMPONDS call broadcast_scalar(tr_pond_cesm, master_task) call broadcast_scalar(restart_pond_cesm, master_task) +#endif call broadcast_scalar(tr_pond_lvl, master_task) call broadcast_scalar(restart_pond_lvl, master_task) call broadcast_scalar(tr_pond_topo, master_task) @@ -1087,7 +1107,9 @@ subroutine input_data restart_age = .false. restart_fy = .false. restart_lvl = .false. +#ifdef UNDEPRECATE_CESMPONDS restart_pond_cesm = .false. +#endif restart_pond_lvl = .false. restart_pond_topo = .false. restart_snow = .false. @@ -1204,17 +1226,29 @@ subroutine input_data endif endif +#ifdef UNDEPRECATE_CESMPONDS rpcesm = 0 +#endif rplvl = 0 rptopo = 0 +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) rpcesm = 1 +#endif if (tr_pond_lvl ) rplvl = 1 if (tr_pond_topo) rptopo = 1 tr_pond = .false. ! explicit melt ponds +#ifdef UNDEPRECATE_CESMPONDS if (rpcesm + rplvl + rptopo > 0) tr_pond = .true. +#else + if (rplvl + rptopo > 0) tr_pond = .true. +#endif +#ifdef UNDEPRECATE_CESMPONDS if (rpcesm + rplvl + rptopo > 1) then +#else + if (rplvl + rptopo > 1) then +#endif if (my_task == master_task) then write(nu_diag,*) subname//' ERROR: Must use only one melt pond scheme' endif @@ -1438,10 +1472,12 @@ subroutine input_data abort_list = trim(abort_list)//":16" endif +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) then if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=T and frzpnd=cesm' abort_list = trim(abort_list)//":17" endif +#endif if (.not. tr_lvl) then if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=T and tr_lvl=F' @@ -2043,10 +2079,14 @@ subroutine input_data write(nu_diag,*) ' ' write(nu_diag,*) ' Melt ponds' write(nu_diag,*) '--------------------------------' +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) then write(nu_diag,1010) ' tr_pond_cesm = ', tr_pond_cesm,' : CESM pond formulation' write(nu_diag,1002) ' pndaspect = ', pndaspect,' : ratio of pond depth to area fraction' elseif (tr_pond_lvl) then +#else + if (tr_pond_lvl) then +#endif write(nu_diag,1010) ' tr_pond_lvl = ', tr_pond_lvl,' : level-ice pond formulation' write(nu_diag,1002) ' pndaspect = ', pndaspect,' : ratio of pond depth to area fraction' write(nu_diag,1000) ' dpscale = ', dpscale,' : time scale for flushing in permeable ice' @@ -2159,7 +2199,9 @@ subroutine input_data if (tr_lvl) write(nu_diag,1010) ' tr_lvl = ', tr_lvl,' : ridging related tracers' if (tr_pond_lvl) write(nu_diag,1010) ' tr_pond_lvl = ', tr_pond_lvl,' : level-ice pond formulation' if (tr_pond_topo) write(nu_diag,1010) ' tr_pond_topo = ', tr_pond_topo,' : topo pond formulation' +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) write(nu_diag,1010) ' tr_pond_cesm = ', tr_pond_cesm,' : CESM pond formulation' +#endif if (tr_snow) write(nu_diag,1010) ' tr_snow = ', tr_snow,' : advanced snow physics' if (tr_iage) write(nu_diag,1010) ' tr_iage = ', tr_iage,' : chronological ice age' if (tr_FY) write(nu_diag,1010) ' tr_FY = ', tr_FY,' : first-year ice area' @@ -2284,7 +2326,9 @@ subroutine input_data write(nu_diag,1011) ' restart_age = ', restart_age write(nu_diag,1011) ' restart_FY = ', restart_FY write(nu_diag,1011) ' restart_lvl = ', restart_lvl +#ifdef UNDEPRECATE_CESMPONDS write(nu_diag,1011) ' restart_pond_cesm= ', restart_pond_cesm +#endif write(nu_diag,1011) ' restart_pond_lvl = ', restart_pond_lvl write(nu_diag,1011) ' restart_pond_topo= ', restart_pond_topo write(nu_diag,1011) ' restart_snow = ', restart_snow @@ -2383,7 +2427,11 @@ subroutine input_data call icepack_init_tracer_flags(tr_iage_in=tr_iage, tr_FY_in=tr_FY, & tr_lvl_in=tr_lvl, tr_iso_in=tr_iso, tr_aero_in=tr_aero, & tr_fsd_in=tr_fsd, tr_snow_in=tr_snow, tr_pond_in=tr_pond, & +#ifdef UNDEPRECATE_CESMPONDS tr_pond_cesm_in=tr_pond_cesm, tr_pond_lvl_in=tr_pond_lvl, tr_pond_topo_in=tr_pond_topo) +#else + tr_pond_lvl_in=tr_pond_lvl, tr_pond_topo_in=tr_pond_topo) +#endif call icepack_init_tracer_sizes(ncat_in=ncat, nilyr_in=nilyr, nslyr_in=nslyr, nblyr_in=nblyr, & nfsd_in=nfsd, n_algae_in=n_algae, n_iso_in=n_iso, n_aero_in=n_aero, & n_DOC_in=n_DOC, n_DON_in=n_DON, & @@ -2445,7 +2493,11 @@ subroutine init_state integer (kind=int_kind) :: ntrcr logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero +#ifdef UNDEPRECATE_CESMPONDS logical (kind=log_kind) :: tr_pond_cesm, tr_pond_lvl, tr_pond_topo +#else + logical (kind=log_kind) :: tr_pond_lvl, tr_pond_topo +#endif logical (kind=log_kind) :: tr_snow, tr_fsd integer (kind=int_kind) :: nt_Tsfc, nt_sice, nt_qice, nt_qsno, nt_iage, nt_FY integer (kind=int_kind) :: nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd @@ -2463,7 +2515,11 @@ subroutine init_state call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & tr_lvl_out=tr_lvl, tr_iso_out=tr_iso, tr_aero_out=tr_aero, & +#ifdef UNDEPRECATE_CESMPONDS tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo, & +#else + tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo, & +#endif tr_snow_out=tr_snow, tr_fsd_out=tr_fsd) call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & nt_qice_out=nt_qice, nt_qsno_out=nt_qsno, nt_iage_out=nt_iage, nt_fy_out=nt_fy, & @@ -2533,10 +2589,12 @@ subroutine init_state if (tr_FY) trcr_depend(nt_FY) = 0 ! area-weighted first-year ice area if (tr_lvl) trcr_depend(nt_alvl) = 0 ! level ice area if (tr_lvl) trcr_depend(nt_vlvl) = 1 ! level ice volume +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) then trcr_depend(nt_apnd) = 0 ! melt pond area trcr_depend(nt_hpnd) = 2+nt_apnd ! melt pond depth endif +#endif if (tr_pond_lvl) then trcr_depend(nt_apnd) = 2+nt_alvl ! melt pond area trcr_depend(nt_hpnd) = 2+nt_apnd ! melt pond depth @@ -2598,10 +2656,12 @@ subroutine init_state nt_strata (it,2) = 0 enddo +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) then n_trcr_strata(nt_hpnd) = 1 ! melt pond depth nt_strata (nt_hpnd,1) = nt_apnd ! on melt pond area endif +#endif if (tr_pond_lvl) then n_trcr_strata(nt_apnd) = 1 ! melt pond area nt_strata (nt_apnd,1) = nt_alvl ! on level ice area diff --git a/cicecore/cicedynB/general/ice_step_mod.F90 b/cicecore/cicedynB/general/ice_step_mod.F90 index 3b0201cbf..3f9b9abeb 100644 --- a/cicecore/cicedynB/general/ice_step_mod.F90 +++ b/cicecore/cicedynB/general/ice_step_mod.F90 @@ -237,7 +237,11 @@ subroutine step_therm1 (dt, iblk) nt_isosno, nt_isoice, nt_rsnw, nt_smice, nt_smliq logical (kind=log_kind) :: & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_iso, tr_aero, tr_pond, tr_pond_cesm, & +#else + tr_iage, tr_FY, tr_iso, tr_aero, tr_pond, & +#endif tr_pond_lvl, tr_pond_topo, calc_Tsfc, highfreq, tr_snow real (kind=dbl_kind) :: & @@ -265,7 +269,11 @@ subroutine step_therm1 (dt, iblk) call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_iso_out=tr_iso, & +#ifdef UNDEPRECATE_CESMPONDS tr_aero_out=tr_aero, tr_pond_out=tr_pond, tr_pond_cesm_out=tr_pond_cesm, & +#else + tr_aero_out=tr_aero, tr_pond_out=tr_pond, & +#endif tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo, & tr_snow_out=tr_snow) call icepack_query_tracer_indices( & diff --git a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 index 5dd35fdf4..503bd18ab 100644 --- a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 @@ -58,7 +58,11 @@ subroutine init_restart_read(ice_ic) logical (kind=log_kind) :: & solve_zsal, tr_fsd, & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & +#else + tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & +#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow character(len=char_len_long) :: & @@ -83,7 +87,11 @@ subroutine init_restart_read(ice_ic) nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, tr_fsd_out=tr_fsd, & +#ifdef UNDEPRECATE_CESMPONDS tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & +#else + tr_iso_out=tr_iso, tr_aero_out=tr_aero, & +#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine) call icepack_warnings_flush(nu_diag) @@ -228,6 +236,7 @@ subroutine init_restart_read(ice_ic) endif endif +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) then if (my_task == master_task) then n = index(filename0,trim(restart_file)) @@ -247,6 +256,7 @@ subroutine init_restart_read(ice_ic) write(nu_diag,*) 'Reading ',filename(1:lenstr(filename)) endif endif +#endif if (tr_pond_lvl) then if (my_task == master_task) then @@ -414,7 +424,11 @@ subroutine init_restart_write(filename_spec) logical (kind=log_kind) :: & solve_zsal, tr_fsd, & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & +#else + tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & +#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow integer (kind=int_kind) :: & @@ -430,7 +444,11 @@ subroutine init_restart_write(filename_spec) nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, tr_fsd_out=tr_fsd, & +#ifdef UNDEPRECATE_CESMPONDS tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & +#else + tr_iso_out=tr_iso, tr_aero_out=tr_aero, & +#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine) call icepack_warnings_flush(nu_diag) @@ -563,6 +581,7 @@ subroutine init_restart_write(filename_spec) endif +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) then write(filename,'(a,a,a,i4.4,a,i2.2,a,i2.2,a,i5.5)') & @@ -582,6 +601,7 @@ subroutine init_restart_write(filename_spec) endif endif +#endif if (tr_pond_lvl) then @@ -851,7 +871,11 @@ subroutine final_restart() logical (kind=log_kind) :: & solve_zsal, & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & +#else + tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & +#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow integer (kind=int_kind) :: & @@ -865,7 +889,11 @@ subroutine final_restart() nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, & +#ifdef UNDEPRECATE_CESMPONDS tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & +#else + tr_iso_out=tr_iso, tr_aero_out=tr_aero, & +#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine) call icepack_warnings_flush(nu_diag) @@ -880,7 +908,9 @@ subroutine final_restart() if (tr_iage) close(nu_dump_age) if (tr_FY) close(nu_dump_FY) if (tr_lvl) close(nu_dump_lvl) +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) close(nu_dump_pond) +#endif if (tr_pond_lvl) close(nu_dump_pond) if (tr_pond_topo) close(nu_dump_pond) if (tr_snow) close(nu_dump_snow) diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 index f117384d9..534637bbb 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 @@ -145,7 +145,11 @@ subroutine init_restart_write(filename_spec) logical (kind=log_kind) :: & solve_zsal, skl_bgc, z_tracers, tr_fsd, & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & +#else + tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & +#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow, & tr_bgc_N, tr_bgc_C, tr_bgc_Nit, & tr_bgc_Sil, tr_bgc_DMS, & @@ -181,7 +185,11 @@ subroutine init_restart_write(filename_spec) nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, tr_fsd_out=tr_fsd, & +#ifdef UNDEPRECATE_CESMPONDS tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & +#else + tr_iso_out=tr_iso, tr_aero_out=tr_aero, & +#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine, & tr_bgc_N_out=tr_bgc_N, tr_bgc_C_out=tr_bgc_C, tr_bgc_Nit_out=tr_bgc_Nit, & @@ -408,10 +416,12 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'vlvl',dims) end if +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) then call define_rest_field(ncid,'apnd',dims) call define_rest_field(ncid,'hpnd',dims) end if +#endif if (tr_pond_topo) then call define_rest_field(ncid,'apnd',dims) diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 59682fe32..03a1fd07f 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -151,7 +151,11 @@ subroutine init_restart_write(filename_spec) solve_zsal, skl_bgc, z_tracers logical (kind=log_kind) :: & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & +#else + tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & +#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow, & tr_bgc_N, tr_bgc_C, tr_bgc_Nit, & tr_bgc_Sil, tr_bgc_DMS, & @@ -187,7 +191,11 @@ subroutine init_restart_write(filename_spec) call icepack_query_tracer_sizes(nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, & +#ifdef UNDEPRECATE_CESMPONDS tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & +#else + tr_iso_out=tr_iso, tr_aero_out=tr_aero, & +#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine, & tr_bgc_N_out=tr_bgc_N, tr_bgc_C_out=tr_bgc_C, tr_bgc_Nit_out=tr_bgc_Nit, & @@ -412,10 +420,12 @@ subroutine init_restart_write(filename_spec) call define_rest_field(File,'vlvl',dims) end if +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) then call define_rest_field(File,'apnd',dims) call define_rest_field(File,'hpnd',dims) end if +#endif if (tr_pond_topo) then call define_rest_field(File,'apnd',dims) diff --git a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 index b2a0e3cd1..fb39375b4 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 @@ -235,11 +235,17 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & +#ifdef UNDEPRECATE_CESMPONDS init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & +#else + init_meltponds_lvl, init_meltponds_topo, & +#endif init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & +#ifdef UNDEPRECATE_CESMPONDS restart_pond_cesm, read_restart_pond_cesm, & +#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -254,7 +260,11 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & +#else + tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & +#endif tr_pond_topo, tr_fsd, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -273,7 +283,11 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -332,6 +346,7 @@ subroutine init_restart enddo ! iblk endif endif +#ifdef UNDEPRECATE_CESMPONDS ! CESM melt ponds if (tr_pond_cesm) then if (trim(runtype) == 'continue') & @@ -345,6 +360,7 @@ subroutine init_restart enddo ! iblk endif endif +#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 index 61f261bb2..c269ab382 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 @@ -145,7 +145,11 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & +#ifdef UNDEPRECATE_CESMPONDS write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & +#else + write_restart_lvl, write_restart_pond_lvl, & +#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_bgc, write_restart_hbrine use ice_restart_driver, only: dumpfile @@ -168,7 +172,11 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, & +#ifdef UNDEPRECATE_CESMPONDS tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & +#else + tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & +#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -177,7 +185,11 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_fsd_out=tr_fsd) call icepack_warnings_flush(nu_diag) @@ -321,7 +333,9 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) call write_restart_pond_cesm +#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_fsd) call write_restart_fsd diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 index b2a0e3cd1..fb39375b4 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 @@ -235,11 +235,17 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & +#ifdef UNDEPRECATE_CESMPONDS init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & +#else + init_meltponds_lvl, init_meltponds_topo, & +#endif init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & +#ifdef UNDEPRECATE_CESMPONDS restart_pond_cesm, read_restart_pond_cesm, & +#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -254,7 +260,11 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & +#else + tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & +#endif tr_pond_topo, tr_fsd, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -273,7 +283,11 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -332,6 +346,7 @@ subroutine init_restart enddo ! iblk endif endif +#ifdef UNDEPRECATE_CESMPONDS ! CESM melt ponds if (tr_pond_cesm) then if (trim(runtype) == 'continue') & @@ -345,6 +360,7 @@ subroutine init_restart enddo ! iblk endif endif +#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 index eb2bdcbf1..272174fe7 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 @@ -145,7 +145,11 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & +#ifdef UNDEPRECATE_CESMPONDS write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & +#else + write_restart_lvl, write_restart_pond_lvl, & +#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_bgc, write_restart_hbrine use ice_restart_driver, only: dumpfile @@ -168,7 +172,11 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, & +#ifdef UNDEPRECATE_CESMPONDS tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & +#else + tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & +#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -177,7 +185,11 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_fsd_out=tr_fsd) call icepack_warnings_flush(nu_diag) @@ -321,7 +333,9 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) call write_restart_pond_cesm +#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_fsd) call write_restart_fsd diff --git a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 index 4725b1d41..b33886954 100644 --- a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 @@ -264,11 +264,17 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & +#ifdef UNDEPRECATE_CESMPONDS init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & +#else + init_meltponds_lvl, init_meltponds_topo, & +#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & +#ifdef UNDEPRECATE_CESMPONDS restart_pond_cesm, read_restart_pond_cesm, & +#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -285,7 +291,11 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & +#else + tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & +#endif tr_pond_topo, tr_snow, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -305,7 +315,11 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -367,6 +381,7 @@ subroutine init_restart enddo ! iblk endif endif +#ifdef UNDEPRECATE_CESMPONDS ! CESM melt ponds if (tr_pond_cesm) then if (trim(runtype) == 'continue') & @@ -380,6 +395,7 @@ subroutine init_restart enddo ! iblk endif endif +#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 index d2efaa8d4..80ff3bd46 100644 --- a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 @@ -137,7 +137,11 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & +#ifdef UNDEPRECATE_CESMPONDS write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & +#else + write_restart_lvl, write_restart_pond_lvl, & +#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -162,7 +166,11 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & +#ifdef UNDEPRECATE_CESMPONDS tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & +#else + tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & +#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -180,7 +188,11 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -391,7 +403,9 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) call write_restart_pond_cesm +#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 index 338b25050..f9b5116d0 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 @@ -215,11 +215,17 @@ subroutine init_restart() use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & +#ifdef UNDEPRECATE_CESMPONDS init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & +#else + init_meltponds_lvl, init_meltponds_topo, & +#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & +#ifdef UNDEPRECATE_CESMPONDS restart_pond_cesm, read_restart_pond_cesm, & +#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -236,7 +242,11 @@ subroutine init_restart() i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & +#else + tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & +#endif tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, tr_snow, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -257,7 +267,11 @@ subroutine init_restart() call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -319,6 +333,7 @@ subroutine init_restart() enddo ! iblk endif endif +#ifdef UNDEPRECATE_CESMPONDS ! CESM melt ponds if (tr_pond_cesm) then if (trim(runtype) == 'continue') & @@ -332,6 +347,7 @@ subroutine init_restart() enddo ! iblk endif endif +#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 index 79066e82a..22234d27f 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 @@ -121,7 +121,11 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & +#ifdef UNDEPRECATE_CESMPONDS write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & +#else + write_restart_lvl, write_restart_pond_lvl, & +#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -146,7 +150,11 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & +#ifdef UNDEPRECATE_CESMPONDS tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & +#else + tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & +#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -164,7 +172,11 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -370,7 +382,9 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) call write_restart_pond_cesm +#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 index 82f0ff0e8..2c90061af 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 @@ -271,11 +271,17 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & +#ifdef UNDEPRECATE_CESMPONDS init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & +#else + init_meltponds_lvl, init_meltponds_topo, & +#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & +#ifdef UNDEPRECATE_CESMPONDS restart_pond_cesm, read_restart_pond_cesm, & +#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -292,7 +298,11 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & +#else + tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & +#endif tr_pond_topo, tr_snow, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -312,7 +322,11 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -374,6 +388,7 @@ subroutine init_restart enddo ! iblk endif endif +#ifdef UNDEPRECATE_CESMPONDS ! CESM melt ponds if (tr_pond_cesm) then if (trim(runtype) == 'continue') & @@ -387,6 +402,7 @@ subroutine init_restart enddo ! iblk endif endif +#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 index 1aaee77f4..00c527da0 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 @@ -157,7 +157,11 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & +#ifdef UNDEPRECATE_CESMPONDS write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & +#else + write_restart_lvl, write_restart_pond_lvl, & +#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -180,7 +184,11 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & +#ifdef UNDEPRECATE_CESMPONDS tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & +#else + tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & +#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -198,7 +206,11 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -400,7 +412,9 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) call write_restart_pond_cesm +#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index 0130d2588..7208da481 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -266,11 +266,17 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & +#ifdef UNDEPRECATE_CESMPONDS init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & +#else + init_meltponds_lvl, init_meltponds_topo, & +#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & +#ifdef UNDEPRECATE_CESMPONDS restart_pond_cesm, read_restart_pond_cesm, & +#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -287,7 +293,11 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & +#else + tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & +#endif tr_pond_topo, tr_snow, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -307,7 +317,11 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -369,6 +383,7 @@ subroutine init_restart enddo ! iblk endif endif +#ifdef UNDEPRECATE_CESMPONDS ! CESM melt ponds if (tr_pond_cesm) then if (trim(runtype) == 'continue') & @@ -382,6 +397,7 @@ subroutine init_restart enddo ! iblk endif endif +#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 index 78e3b5259..5547ba765 100644 --- a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 @@ -151,7 +151,11 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & +#ifdef UNDEPRECATE_CESMPONDS write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & +#else + write_restart_lvl, write_restart_pond_lvl, & +#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -174,7 +178,11 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & +#ifdef UNDEPRECATE_CESMPONDS tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & +#else + tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & +#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -192,7 +200,11 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -396,7 +408,9 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl +#ifdef UNDEPRECATE_CESMPONDS if (tr_pond_cesm) call write_restart_pond_cesm +#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 index 60f71fa8a..a252bc1b7 100644 --- a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 @@ -241,11 +241,17 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & +#ifdef UNDEPRECATE_CESMPONDS init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & +#else + init_meltponds_lvl, init_meltponds_topo, & +#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & +#ifdef UNDEPRECATE_CESMPONDS restart_pond_cesm, read_restart_pond_cesm, & +#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -261,7 +267,11 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & +#else + tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & +#endif tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -280,7 +290,11 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -340,6 +354,7 @@ subroutine init_restart enddo ! iblk endif endif +#ifdef UNDEPRECATE_CESMPONDS ! CESM melt ponds if (tr_pond_cesm) then if (trim(runtype) == 'continue') & @@ -353,6 +368,7 @@ subroutine init_restart enddo ! iblk endif endif +#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 index 60f71fa8a..a252bc1b7 100644 --- a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 @@ -241,11 +241,17 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & +#ifdef UNDEPRECATE_CESMPONDS init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & +#else + init_meltponds_lvl, init_meltponds_topo, & +#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & +#ifdef UNDEPRECATE_CESMPONDS restart_pond_cesm, read_restart_pond_cesm, & +#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -261,7 +267,11 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & +#ifdef UNDEPRECATE_CESMPONDS tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & +#else + tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & +#endif tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -280,7 +290,11 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & +#ifdef UNDEPRECATE_CESMPONDS tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -340,6 +354,7 @@ subroutine init_restart enddo ! iblk endif endif +#ifdef UNDEPRECATE_CESMPONDS ! CESM melt ponds if (tr_pond_cesm) then if (trim(runtype) == 'continue') & @@ -353,6 +368,7 @@ subroutine init_restart enddo ! iblk endif endif +#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/shared/ice_init_column.F90 b/cicecore/shared/ice_init_column.F90 index 5643b4277..60adcfdfa 100644 --- a/cicecore/shared/ice_init_column.F90 +++ b/cicecore/shared/ice_init_column.F90 @@ -44,7 +44,11 @@ module ice_init_column private public :: init_thermo_vertical, init_shortwave, & init_age, init_FY, init_lvl, init_fsd, & +#ifdef UNDEPRECATE_CESMPONDS init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & +#else + init_meltponds_lvl, init_meltponds_topo, & +#endif init_aerosol, init_bgc, init_hbrine, init_zbgc, input_zbgc, & count_tracers, init_isotope, init_snowtracers @@ -543,6 +547,7 @@ subroutine init_lvl(iblk, alvl, vlvl) end subroutine init_lvl +#ifdef UNDEPRECATE_CESMPONDS !======================================================================= ! Initialize melt ponds. @@ -558,7 +563,7 @@ subroutine init_meltponds_cesm(apnd, hpnd) hpnd(:,:,:) = c0 end subroutine init_meltponds_cesm - +#endif !======================================================================= ! Initialize melt ponds. @@ -1813,7 +1818,11 @@ subroutine count_tracers integer (kind=int_kind) :: ntrcr logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_pond, tr_aero, tr_fsd logical (kind=log_kind) :: tr_snow +#ifdef UNDEPRECATE_CESMPONDS logical (kind=log_kind) :: tr_iso, tr_pond_cesm, tr_pond_lvl, tr_pond_topo +#else + logical (kind=log_kind) :: tr_iso, tr_pond_lvl, tr_pond_topo +#endif integer (kind=int_kind) :: nt_Tsfc, nt_sice, nt_qice, nt_qsno, nt_iage, nt_FY integer (kind=int_kind) :: nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, nt_aero integer (kind=int_kind) :: nt_fsd, nt_isosno, nt_isoice @@ -1898,7 +1907,11 @@ subroutine count_tracers call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & tr_lvl_out=tr_lvl, tr_aero_out=tr_aero, tr_pond_out=tr_pond, & +#ifdef UNDEPRECATE_CESMPONDS tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & +#else + tr_pond_lvl_out=tr_pond_lvl, & +#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_fsd_out=tr_fsd, & tr_snow_out=tr_snow, tr_iso_out=tr_iso, & tr_bgc_Nit_out=tr_bgc_Nit, tr_bgc_Am_out =tr_bgc_Am, tr_bgc_Sil_out=tr_bgc_Sil, & diff --git a/cicecore/shared/ice_restart_column.F90 b/cicecore/shared/ice_restart_column.F90 index 6ce393190..b28ae2f60 100644 --- a/cicecore/shared/ice_restart_column.F90 +++ b/cicecore/shared/ice_restart_column.F90 @@ -29,7 +29,9 @@ module ice_restart_column public :: write_restart_age, read_restart_age, & write_restart_FY, read_restart_FY, & write_restart_lvl, read_restart_lvl, & +#ifdef UNDEPRECATE_CESMPONDS write_restart_pond_cesm, read_restart_pond_cesm, & +#endif write_restart_pond_lvl, read_restart_pond_lvl, & write_restart_pond_topo, read_restart_pond_topo, & write_restart_snow, read_restart_snow, & @@ -43,7 +45,9 @@ module ice_restart_column restart_age , & ! if .true., read age tracer restart file restart_FY , & ! if .true., read FY tracer restart file restart_lvl , & ! if .true., read lvl tracer restart file +#ifdef UNDEPRECATE_CESMPONDS restart_pond_cesm, & ! if .true., read meltponds restart file +#endif restart_pond_lvl , & ! if .true., read meltponds restart file restart_pond_topo, & ! if .true., read meltponds restart file restart_snow , & ! if .true., read snow tracer restart file @@ -256,6 +260,7 @@ subroutine read_restart_lvl() end subroutine read_restart_lvl +#ifdef UNDEPRECATE_CESMPONDS !======================================================================= ! ! Dumps all values needed for restarting @@ -322,7 +327,7 @@ subroutine read_restart_pond_cesm() 'hpnd',ncat,diag,field_loc_center,field_type_scalar) end subroutine read_restart_pond_cesm - +#endif !======================================================================= ! ! Dumps all values needed for restarting diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 27a333d86..81446105e 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -100,8 +100,6 @@ restart_FY = .false. tr_lvl = .true. restart_lvl = .false. - tr_pond_cesm = .false. - restart_pond_cesm = .false. tr_pond_topo = .false. restart_pond_topo = .false. tr_pond_lvl = .true. diff --git a/configuration/scripts/options/set_nml.alt01 b/configuration/scripts/options/set_nml.alt01 index 24947dcda..2f465e4d1 100644 --- a/configuration/scripts/options/set_nml.alt01 +++ b/configuration/scripts/options/set_nml.alt01 @@ -6,7 +6,6 @@ distribution_wght = 'block' tr_iage = .false. tr_FY = .false. tr_lvl = .true. -tr_pond_cesm = .false. tr_pond_topo = .false. tr_pond_lvl = .false. tr_aero = .false. diff --git a/configuration/scripts/options/set_nml.alt02 b/configuration/scripts/options/set_nml.alt02 index a478809ca..3c4d9c383 100644 --- a/configuration/scripts/options/set_nml.alt02 +++ b/configuration/scripts/options/set_nml.alt02 @@ -5,7 +5,6 @@ distribution_type = 'sectrobin' tr_iage = .true. tr_FY = .true. tr_lvl = .true. -tr_pond_cesm = .false. tr_pond_topo = .false. tr_pond_lvl = .false. tr_aero = .false. diff --git a/configuration/scripts/options/set_nml.alt03 b/configuration/scripts/options/set_nml.alt03 index c2ca38f32..22c3c28b0 100644 --- a/configuration/scripts/options/set_nml.alt03 +++ b/configuration/scripts/options/set_nml.alt03 @@ -6,7 +6,6 @@ conserv_check = .true. tr_iage = .false. tr_FY = .false. tr_lvl = .false. -tr_pond_cesm = .false. tr_pond_topo = .true. tr_pond_lvl = .false. tr_aero = .true. diff --git a/configuration/scripts/options/set_nml.alt04 b/configuration/scripts/options/set_nml.alt04 index d1bc6ad02..a07f70e66 100644 --- a/configuration/scripts/options/set_nml.alt04 +++ b/configuration/scripts/options/set_nml.alt04 @@ -6,7 +6,6 @@ distribution_wght = 'block' tr_iage = .true. tr_FY = .true. tr_lvl = .true. -tr_pond_cesm = .false. tr_pond_topo = .false. tr_pond_lvl = .true. tr_aero = .true. diff --git a/configuration/scripts/options/set_nml.alt05 b/configuration/scripts/options/set_nml.alt05 index 6793b5954..d97207dfa 100644 --- a/configuration/scripts/options/set_nml.alt05 +++ b/configuration/scripts/options/set_nml.alt05 @@ -2,7 +2,6 @@ ice_ic = 'internal' tr_iage = .false. tr_FY = .false. tr_lvl = .false. -tr_pond_cesm = .true. tr_pond_topo = .false. tr_pond_lvl = .false. tr_aero = .false. diff --git a/configuration/scripts/options/set_nml.boxadv b/configuration/scripts/options/set_nml.boxadv index 716884031..d0d2907c3 100644 --- a/configuration/scripts/options/set_nml.boxadv +++ b/configuration/scripts/options/set_nml.boxadv @@ -13,7 +13,6 @@ ice_data_dist = 'box2001' tr_iage = .true. tr_FY = .false. tr_lvl = .true. -tr_pond_cesm = .false. tr_pond_topo = .false. tr_pond_lvl = .false. tr_aero = .false. diff --git a/configuration/scripts/options/set_nml.boxnodyn b/configuration/scripts/options/set_nml.boxnodyn index 0b9a214f1..679125222 100644 --- a/configuration/scripts/options/set_nml.boxnodyn +++ b/configuration/scripts/options/set_nml.boxnodyn @@ -38,7 +38,6 @@ ns_boundary_type = 'open' tr_iage = .false. tr_FY = .false. tr_lvl = .false. -tr_pond_cesm = .false. tr_pond_topo = .false. tr_pond_lvl = .false. tr_aero = .false. diff --git a/configuration/scripts/options/set_nml.boxrestore b/configuration/scripts/options/set_nml.boxrestore index fd6a9e59e..48c5591de 100644 --- a/configuration/scripts/options/set_nml.boxrestore +++ b/configuration/scripts/options/set_nml.boxrestore @@ -18,7 +18,6 @@ f_aice = 'd' tr_iage = .true. tr_FY = .true. tr_lvl = .true. -tr_pond_cesm = .false. tr_pond_topo = .false. tr_pond_lvl = .false. tr_aero = .false. diff --git a/doc/source/science_guide/sg_tracers.rst b/doc/source/science_guide/sg_tracers.rst index 215c13d08..b75edfb00 100644 --- a/doc/source/science_guide/sg_tracers.rst +++ b/doc/source/science_guide/sg_tracers.rst @@ -78,8 +78,6 @@ is not in use. "tr_FY", "1", "aice", "nt_FY", " " "tr_lvl", "2", "aice", "nt_alvl", " " " ", " ", "vice", "nt_vlvl", " " - "tr_pond_cesm", "2", "aice", "nt_apnd", " " - " ", " ", "apnd", "nt_vpnd", " " "tr_pond_lvl", "3", "aice", "nt_apnd", " " " ", " ", "apnd", "nt_vpnd", " " " ", " ", "apnd", "nt_ipnd", " " @@ -113,7 +111,9 @@ is not in use. "tr_zaero", "n_zaero", "fbri or (a,v)ice", "nt_zaero", "nlt_zaero" " ", "1", "fbri", "nt_zbgc_frac", " " - +.. + "tr_pond_cesm", "2", "aice", "nt_apnd", " " + " ", " ", "apnd", "nt_vpnd", " " Users may add any number of additional tracers that are transported conservatively, provided that the dependency ``trcr_depend`` is defined appropriately. diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 6b10a2165..7c43d8389 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -353,8 +353,8 @@ tracer_nml "``tr_iage``", "logical", "ice age", "``.false.``" "``tr_iso``", "logical", "isotopes", "``.false.``" "``tr_lvl``", "logical", "level ice area and volume", "``.false.``" - "``tr_pond_cesm``", "logical", "CESM melt ponds", "``.false.``" "``tr_pond_lvl``", "logical", "level-ice melt ponds", "``.false.``" + "``tr_pond_cesm``", " ", "DEPRECATED", " " "``tr_pond_topo``", "logical", "topo melt ponds", "``.false.``" "``tr_snow``", "logical", "advanced snow physics", "``.false.``" "``restart_aero``", "logical", "restart tracer values from file", "``.false.``" @@ -363,12 +363,15 @@ tracer_nml "``restart_FY``", "logical", "restart tracer values from file", "``.false.``" "``restart_iso``", "logical", "restart tracer values from file", "``.false.``" "``restart_lvl``", "logical", "restart tracer values from file", "``.false.``" - "``restart_pond_cesm``", "logical", "restart tracer values from file", "``.false.``" "``restart_pond_lvl``", "logical", "restart tracer values from file", "``.false.``" "``restart_pond_topo``", "logical", "restart tracer values from file", "``.false.``" "``restart_snow``", "logical", "restart snow tracer values from file", "``.false.``" "", "", "", "" +.. + "``tr_pond_cesm``", "logical", "CESM melt ponds", "``.false.``" + "``restart_pond_cesm``", "logical", "restart tracer values from file", "``.false.``" + thermo_nml ~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/user_guide/ug_testing.rst b/doc/source/user_guide/ug_testing.rst index b8d42ad6d..9d103a21a 100644 --- a/doc/source/user_guide/ug_testing.rst +++ b/doc/source/user_guide/ug_testing.rst @@ -347,7 +347,6 @@ Lines that begin with # or are blank are ignored. For example, smoke col 1x1 debug,run1year restart col 1x1 debug restart col 1x1 diag1 - restart col 1x1 pondcesm restart col 1x1 pondlvl restart col 1x1 pondtopo diff --git a/icepack b/icepack index 76ecd418d..595c00cfe 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 76ecd418d2efad7e74fe35c4ec85f0830923bda6 +Subproject commit 595c00cfe8121d4e2405282fa08c0907b22e8718 From 3af3d1b0e7b2b144cf9f5a183e79e5e6982e1e99 Mon Sep 17 00:00:00 2001 From: Elizabeth Hunke Date: Sun, 31 Jul 2022 10:15:50 -0600 Subject: [PATCH 05/57] Deprecate 0-layer thermodynamics in the CICE driver (#732) * initial 0-layer thermo deprecation * capitalization matters for cpps * set_nml.boxadv needs thermo turned on * deprecate old ridging participation and redistribution functions * Revert "deprecate old ridging participation and redistribution functions" This reverts commit 95c289af51d92c0d598bb401cc8b0b93b201df6c. --- cicecore/cicedynB/general/ice_init.F90 | 13 ++++++++++++- cicecore/shared/ice_init_column.F90 | 2 +- configuration/scripts/machines/env.badger_intel | 4 ++-- configuration/scripts/options/set_nml.alt01 | 2 +- configuration/scripts/options/set_nml.boxadv | 2 +- configuration/scripts/options/set_nml.boxnodyn | 2 +- configuration/scripts/options/set_nml.boxrestore | 2 +- .../cice4_restart_conversion/convert_restarts.f90 | 5 +++++ doc/source/cice_index.rst | 10 +++++++--- doc/source/user_guide/ug_case_settings.rst | 5 ++++- 10 files changed, 35 insertions(+), 12 deletions(-) diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 7a273148a..fdb435ffa 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -432,7 +432,11 @@ subroutine input_data conserv_check = .false. ! tracer conservation check shortwave = 'ccsm3' ! 'ccsm3' or 'dEdd' (delta-Eddington) albedo_type = 'ccsm3' ! 'ccsm3' or 'constant' +#ifdef UNDEPRECATE_0LAYER ktherm = 1 ! -1 = OFF, 0 = 0-layer, 1 = BL99, 2 = mushy thermo +#else + ktherm = 1 ! -1 = OFF, 1 = BL99, 2 = mushy thermo +#endif conduct = 'bubbly' ! 'MU71' or 'bubbly' (Pringle et al 2007) coriolis = 'latitude' ! latitude dependent, or 'constant' ssh_stress = 'geostrophic' ! 'geostrophic' or 'coupled' @@ -1897,8 +1901,10 @@ subroutine input_data tmpstr2 = ' : Bitz and Lipscomb 1999 thermo' elseif (ktherm == 2) then tmpstr2 = ' : mushy-layer thermo' +#ifdef UNDEPRECATE_0LAYER elseif (ktherm == 0) then tmpstr2 = ' : zero-layer thermo' +#endif elseif (ktherm < 0) then tmpstr2 = ' : Thermodynamics disabled' else @@ -2488,8 +2494,10 @@ subroutine init_state it , & ! tracer index iblk ! block index +#ifdef UNDEPRECATE_0LAYER logical (kind=log_kind) :: & heat_capacity ! from icepack +#endif integer (kind=int_kind) :: ntrcr logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero @@ -2511,7 +2519,9 @@ subroutine init_state !----------------------------------------------------------------- +#ifdef UNDEPRECATE_0LAYER call icepack_query_parameters(heat_capacity_out=heat_capacity) +#endif call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & tr_lvl_out=tr_lvl, tr_iso_out=tr_iso, tr_aero_out=tr_aero, & @@ -2553,6 +2563,7 @@ subroutine init_state file=__FILE__, line=__LINE__) endif +#ifdef UNDEPRECATE_0LAYER if (.not.heat_capacity) then if (nilyr > 1) then @@ -2570,7 +2581,7 @@ subroutine init_state endif endif ! heat_capacity = F - +#endif endif ! my_task !----------------------------------------------------------------- diff --git a/cicecore/shared/ice_init_column.F90 b/cicecore/shared/ice_init_column.F90 index 60adcfdfa..89a378948 100644 --- a/cicecore/shared/ice_init_column.F90 +++ b/cicecore/shared/ice_init_column.F90 @@ -149,7 +149,7 @@ subroutine init_thermo_vertical character(len=*), parameter :: subname='(init_thermo_vertical)' !----------------------------------------------------------------- - ! initialize heat_capacity, l_brine, and salinity profile + ! initialize !----------------------------------------------------------------- call icepack_query_parameters(depressT_out=depressT) diff --git a/configuration/scripts/machines/env.badger_intel b/configuration/scripts/machines/env.badger_intel index 6d53bf978..5532b26d6 100755 --- a/configuration/scripts/machines/env.badger_intel +++ b/configuration/scripts/machines/env.badger_intel @@ -35,9 +35,9 @@ setenv ICE_MACHINE_MACHINFO "Penguin Intel Xeon Broadwell" setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_ENVINFO "(Note: can vary) ifort 19.0.4.243 20190416, openmpi/2.1.2, netcdf4.4.0" setenv ICE_MACHINE_MAKE gmake -setenv ICE_MACHINE_WKDIR /net/scratch3/$user/CICE_RUNS +setenv ICE_MACHINE_WKDIR /net/scratch4/$user/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /usr/projects/climate/eclare/DATA/Consortium -setenv ICE_MACHINE_BASELINE /net/scratch3/$user/CICE_BASELINE +setenv ICE_MACHINE_BASELINE /net/scratch4/$user/CICE_BASELINE setenv ICE_MACHINE_SUBMIT "sbatch " #setenv ICE_MACHINE_ACCT e3sm setenv ICE_MACHINE_ACCT climatehilat diff --git a/configuration/scripts/options/set_nml.alt01 b/configuration/scripts/options/set_nml.alt01 index 2f465e4d1..6c2bf2159 100644 --- a/configuration/scripts/options/set_nml.alt01 +++ b/configuration/scripts/options/set_nml.alt01 @@ -11,7 +11,7 @@ tr_pond_lvl = .false. tr_aero = .false. kcatbound = 1 kitd = 0 -ktherm = 0 +ktherm = 1 conduct = 'bubbly' kdyn = 1 seabed_stress = .true. diff --git a/configuration/scripts/options/set_nml.boxadv b/configuration/scripts/options/set_nml.boxadv index d0d2907c3..ca05970e3 100644 --- a/configuration/scripts/options/set_nml.boxadv +++ b/configuration/scripts/options/set_nml.boxadv @@ -17,7 +17,7 @@ tr_pond_topo = .false. tr_pond_lvl = .false. tr_aero = .false. kitd = 1 -ktherm = 0 +ktherm = 1 kdyn = 2 kstrength = 0 krdg_partic = 0 diff --git a/configuration/scripts/options/set_nml.boxnodyn b/configuration/scripts/options/set_nml.boxnodyn index 679125222..71abfdaea 100644 --- a/configuration/scripts/options/set_nml.boxnodyn +++ b/configuration/scripts/options/set_nml.boxnodyn @@ -42,7 +42,7 @@ tr_pond_topo = .false. tr_pond_lvl = .false. tr_aero = .false. kitd = 0 -ktherm = 0 +ktherm = 1 kdyn = 1 revised_evp = .false. kstrength = 1 diff --git a/configuration/scripts/options/set_nml.boxrestore b/configuration/scripts/options/set_nml.boxrestore index 48c5591de..7bc4efa26 100644 --- a/configuration/scripts/options/set_nml.boxrestore +++ b/configuration/scripts/options/set_nml.boxrestore @@ -22,7 +22,7 @@ tr_pond_topo = .false. tr_pond_lvl = .false. tr_aero = .false. kitd = 1 -ktherm = 0 +ktherm = 1 kdyn = 1 revised_evp = .true. kstrength = 0 diff --git a/configuration/tools/cice4_restart_conversion/convert_restarts.f90 b/configuration/tools/cice4_restart_conversion/convert_restarts.f90 index 51f8027b2..30c952510 100644 --- a/configuration/tools/cice4_restart_conversion/convert_restarts.f90 +++ b/configuration/tools/cice4_restart_conversion/convert_restarts.f90 @@ -39,7 +39,12 @@ program convert_restarts logical (kind=log_kind), parameter :: & oceanmixed_ice = .true., & ! if true, read/write ocean mixed layer fields heat_capacity = .true., & ! if true, ice has nonzero heat capacity +#ifdef UNDEPRECATE_0LAYER ! if false, use zero-layer thermodynamics +#else + ! heat_capacity = .false. (zero-layer thermodynamics) + ! has been deprecated in CICE and Icepack +#endif diag = .true. ! write min/max diagnostics for fields ! file names diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index a0e8df9ba..69e98225e 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -307,7 +307,6 @@ either Celsius or Kelvin units). "Gstar", "piecewise-linear ridging participation function parameter", "0.15" "**H**", "", "" "halo_info", "information for updating ghost cells", "" - "heat_capacity", "if true, use salinity-dependent thermodynamics", "T" "hfrazilmin", "minimum thickness of new frazil ice", "0.05 m" "hi_min", "minimum ice thickness for thinnest ice category", "0.01 m" "hi_ssl", "ice surface scattering layer thickness", "0.05 m" @@ -388,10 +387,9 @@ either Celsius or Kelvin units). "krdg_partic", "ridging participation function", "1" "krdg_redist", "ridging redistribution function", "1" "krgdn", "mean ridge thickness per thickness of ridging ice", "" - "kseaice", "thermal conductivity of ice for zero-layer thermodynamics", "2.0 W/m/deg" "ksno", "thermal conductivity of snow", "0.30 W/m/deg" "kstrength", "ice stength formulation (1= :cite:`Rothrock75`, 0 = :cite:`Hibler79`)", "1" - "ktherm", "thermodynamic formulation (0 = zero-layer, 1 = :cite:`Bitz99`, 2 = mushy)", "" + "ktherm", "thermodynamic formulation (-1 = off, 1 = :cite:`Bitz99`, 2 = mushy)", "" "**L**", "", "" "l_brine", "flag for brine pocket effects", "" "l_fixed_area", "flag for prescribing remapping fluxes", "" @@ -774,3 +772,9 @@ either Celsius or Kelvin units). "zref", "reference height for stability", "10. m" "zTrf", "reference height for :math:`T_{ref}`, :math:`Q_{ref}`, :math:`U_{ref}`", "2. m" "zvir", "gas constant (water vapor)/gas constant (air) - 1", "0.606" + +.. + ktherm=0 has been deprecated + "heat_capacity", "if true, use salinity-dependent thermodynamics", "T" + "kseaice", "thermal conductivity of ice for zero-layer thermodynamics", "2.0 W/m/deg" + "ktherm", "thermodynamic formulation (0 = zero-layer, 1 = :cite:`Bitz99`, 2 = mushy)", "" diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 7c43d8389..50871bfc5 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -391,7 +391,6 @@ thermo_nml "", "``1``", "linear remapping ITD approximation", "" "``ksno``", "real", "snow thermal conductivity", "0.3" "``ktherm``", "``-1``", "thermodynamic model disabled", "1" - "", "``0``", "zero-layer thermodynamic model", "" "", "``1``", "Bitz and Lipscomb thermodynamic model", "" "", "``2``", "mushy-layer thermodynamic model", "" "``phi_c_slow_mode``", ":math:`0<\phi_c < 1`", "critical liquid fraction", "0.05" @@ -399,6 +398,10 @@ thermo_nml "``Rac_rapid_mode``", "real", "critical Rayleigh number", "10.0" "", "", "", "" +.. + ktherm=0 has been deprecated + "", "``0``", "zero-layer thermodynamic model", "" + .. _dynamics_nml: dynamics_nml From c6470cf67acb90e13fe26e7bfc3d001dacd3fe91 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Sun, 31 Jul 2022 14:55:42 -0700 Subject: [PATCH 06/57] Update icepack to 3cb1746a202615044e (#743) Bring in deprecated 0-layer and cesmponds changes in Icepack. --- icepack | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/icepack b/icepack index 595c00cfe..3cb1746a2 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 595c00cfe8121d4e2405282fa08c0907b22e8718 +Subproject commit 3cb1746a202615044e38e2928f85294f9c0e72f8 From 063a7f2151ec20571d641af0552f4ea182acb9b6 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Sun, 31 Jul 2022 14:56:21 -0700 Subject: [PATCH 07/57] Update cice.t-test.py to use cartopy instead of basemap. (#742) * Update cice.t-test.py to use cartopy instead of basemap. * Bug fix to add gridlines to SH plots * commented out contour section of plots. Default is pcolor. If contour is selected it will instead make a pcolor plot * cice.t-test.py: addded individual colorbar to each plot. environment.yml: removed basemap, added cartopy * Remove shrink option from difference plots * Add blockall distribution_wght to set_nml.qc to address plotting issues in qc test Co-authored-by: daveh150 --- .../scripts/machines/environment.yml | 2 +- configuration/scripts/options/set_nml.qc | 1 + configuration/scripts/tests/QC/cice.t-test.py | 303 +++++++++++++----- doc/source/user_guide/ug_testing.rst | 1 + 4 files changed, 218 insertions(+), 89 deletions(-) diff --git a/configuration/scripts/machines/environment.yml b/configuration/scripts/machines/environment.yml index 57bdacfec..e76ff692f 100644 --- a/configuration/scripts/machines/environment.yml +++ b/configuration/scripts/machines/environment.yml @@ -12,7 +12,7 @@ dependencies: # Python dependencies for plotting scripts - numpy - matplotlib-base - - basemap + - cartopy - netcdf4 # Python dependencies for building the HTML documentation - sphinx diff --git a/configuration/scripts/options/set_nml.qc b/configuration/scripts/options/set_nml.qc index 70ba1b429..feefb376d 100644 --- a/configuration/scripts/options/set_nml.qc +++ b/configuration/scripts/options/set_nml.qc @@ -13,3 +13,4 @@ diagfreq = 24 histfreq = 'd','x','x','x','x' f_hi = 'd' hist_avg = .false. +distribution_wght = 'blockall' diff --git a/configuration/scripts/tests/QC/cice.t-test.py b/configuration/scripts/tests/QC/cice.t-test.py index c84583baa..b941c4912 100755 --- a/configuration/scripts/tests/QC/cice.t-test.py +++ b/configuration/scripts/tests/QC/cice.t-test.py @@ -379,8 +379,8 @@ def plot_data(data, lat, lon, units, case, plot_type): try: # Load the necessary plotting libraries import matplotlib.pyplot as plt - from mpl_toolkits.basemap import Basemap - from mpl_toolkits.axes_grid1 import make_axes_locatable + import cartopy.crs as ccrs + import cartopy.feature as cfeature except ImportError: logger.warning('Error loading necessary Python modules in plot_data function') return @@ -389,87 +389,200 @@ def plot_data(data, lat, lon, units, case, plot_type): import warnings warnings.filterwarnings("ignore", category=UserWarning) - # Create the figure and axis - fig, axes = plt.subplots(nrows=1, ncols=2,figsize=(14, 8)) - - # Plot the northern hemisphere data as a scatter plot - # Create the basemap, and draw boundaries - plt.sca(axes[0]) - m = Basemap(projection='npstere', boundinglat=35,lon_0=270, resolution='l') - m.drawcoastlines() - m.fillcontinents() - m.drawcountries() + # define north and south polar stereographic coord ref system + npstereo = ccrs.NorthPolarStereo(central_longitude=-90.0) # define projection + spstereo = ccrs.SouthPolarStereo(central_longitude= 90.0) # define projection + + # define figure + fig = plt.figure(figsize=[14,7]) + + # add axis for each hemishpere + ax1 = fig.add_subplot(121,projection=npstereo) + ax2 = fig.add_subplot(122,projection=spstereo) + + # set plot extents + ax1.set_extent([-180.,180.,35.,90.],ccrs.PlateCarree()) + ax2.set_extent([-180.,180.,-90.,-35.],ccrs.PlateCarree()) + + # add land features NH plot + ax1.add_feature(cfeature.LAND, color='lightgray') + ax1.add_feature(cfeature.BORDERS) + ax1.add_feature(cfeature.COASTLINE) + + # add land features SH plot + ax2.add_feature(cfeature.LAND, color='lightgray') + ax2.add_feature(cfeature.BORDERS) + ax2.add_feature(cfeature.COASTLINE) + + # add grid lines + dlon = 30.0 + dlat = 15.0 + mpLons = np.arange(-180. ,180.0+dlon,dlon) + mpLats = np.arange(-90.,90.0+dlat ,dlat) + + g1 = ax1.gridlines(xlocs=mpLons,ylocs=mpLats, + draw_labels=True, + x_inline=False,y_inline=False) + + g2 = ax2.gridlines(xlocs=mpLons,ylocs=mpLats, + draw_labels=True, + x_inline=False,y_inline=False) + + + # Specify Min/max colors for each hemisphere + # check for minus to see if it is a difference plot + if '\n- ' in case: # this is a difference plot + # specify colormap + mycmap = 'seismic' # blue,white,red with white centered colormap + + # determine max absolute value to use for color range + # intent is use same min/max with center zero + dmin = np.abs(data.min()) + dmax = np.abs(data.max()) + clim = np.max([dmin,dmax]) + + # this specifies both hemishperes the same range. + cminNH = -clim + cmaxNH = clim + cminSH = -clim + cmaxSH = clim + + else: # not a difference plot + # specify colormap + mycmap = 'jet' + + # arbitrary limits for each Hemishpere + cminNH = 0.0 + cmaxNH = 5.0 + cminSH = 0.0 + cmaxSH = 2.0 if plot_type == 'scatter': - x, y = m(lon,lat) - sc = m.scatter(x, y, c=data, cmap='jet', lw=0, s=4) - else: - # Create new arrays to add 1 additional longitude value to prevent a - # small amount of whitespace around longitude of 0/360 degrees. - lon_cyc = np.zeros((lon.shape[0],lon.shape[1]+1)) - mask = np.zeros((data.shape[0],data.shape[1]+1)) - lat_cyc = np.zeros((lat.shape[0],lat.shape[1]+1)) - - mask[:,0:-1] = data.mask[:,:] - mask[:,-1] = data.mask[:,0] - lon_cyc[:,0:-1] = lon[:,:]; lon_cyc[:,-1] = lon[:,0] - lat_cyc[:,0:-1] = lat[:,:]; lat_cyc[:,-1] = lat[:,0] - - lon1 = np.ma.masked_array(lon_cyc, mask=mask) - lat1 = np.ma.masked_array(lat_cyc, mask=mask) - - d = np.zeros((data.shape[0],data.shape[1]+1)) - d[:,0:-1] = data[:,:] - d[:,-1] = data[:,0] - d1 = np.ma.masked_array(d,mask=mask) - - x, y = m(lon1.data, lat1.data) + # plot NH + scNH = ax1.scatter(lon,lat,c=data,cmap=mycmap,s=4,edgecolors='none', + vmin=cminNH, vmax=cmaxNH, + transform=ccrs.PlateCarree()) + + # plot SH + scSH = ax2.scatter(lon,lat,c=data,cmap=mycmap,s=4,edgecolors='none', + vmin=cminSH, vmax=cmaxSH, + transform=ccrs.PlateCarree()) + else: if plot_type == 'contour': - sc = m.contourf(x, y, d1, cmap='jet') - else: # pcolor - sc = m.pcolor(x, y, d1, cmap='jet') + print("contour plot depreciated. using pcolor.") + + scNH = ax1.pcolormesh(lon,lat,data,cmap=mycmap, + vmin=cminNH, vmax=cmaxNH, + transform=ccrs.PlateCarree()) + + scSH = ax2.pcolormesh(lon,lat,data,cmap=mycmap, + vmin=cminSH, vmax=cmaxSH, + transform=ccrs.PlateCarree()) + + #else: + # # Create new arrays to add 1 additional longitude value to prevent a + # # small amount of whitespace around seam + # lon_cyc = np.zeros((lon.shape[0],lon.shape[1]+1)) + # lat_cyc = np.zeros((lat.shape[0],lat.shape[1]+1)) + # data1 = np.zeros((data.shape[0],data.shape[1]+1)) + # mask = np.zeros((data.shape[0],data.shape[1]+1)) + + # mask[:,0:-1] = data.mask[:,:] + # mask[:,-1] = data.mask[:,0] + # lon_cyc[:,0:-1] = lon[:,:] + # lon_cyc[:,-1] = lon[:,0] + # lat_cyc[:,0:-1] = lat[:,:] + # lat_cyc[:,-1] = lat[:,0] + # data1[:,0:-1] = data[:,:] + # data1[:,-1] = data[:,0] + + # lon1 = np.ma.masked_array(lon_cyc, mask=mask) + # lat1 = np.ma.masked_array(lat_cyc, mask=mask) + # data1 = np.ma.masked_array(data1, mask=mask) + + # if plot_type == 'contour': + # # plotting around -180/180 and 0/360 is a challenge. + # # need to use lons in both 0-360 and +- 180 + # # make lons +/- 180 + # lon1_pm180 = np.where(lon1 < 180.0, lon1, lon1-360.0) + # lon1_pm180 = np.ma.masked_where(lon1.mask,lon1_pm180) + + # # get 90-270 lons from the lon 0-360 array (lon1) + # # note: use 91, 269 to prevent small amount of white space in contour plots + # lonmask = np.logical_or(lon1 <= 91.0,lon1 >= 269.0) + # lons_90_270 = np.ma.masked_where(lonmask,lon1) + # lats_90_270 = np.ma.MaskedArray(lat1,mask=lons_90_270.mask) + # data_90_270 = np.ma.MaskedArray(data1,mask=lons_90_270.mask) + # data_90_270.mask = np.logical_or(data1.mask,data_90_270.mask) + + # # get -92-92 lons from +/- 180 (lon1_pm180) + # # note: use 92 to prevent small amount of white space in contour plots + # lonmask = np.logical_or(lon1_pm180 <= -92.0, lon1_pm180 >= 92.0) + # lons_m90_90 = np.ma.masked_where(lonmask,lon1_pm180) + # lats_m90_90 = np.ma.MaskedArray(lat1,mask=lons_m90_90.mask) + # data_m90_90 = np.ma.MaskedArray(data1,mask=lons_m90_90.mask) + # data_m90_90.mask = np.logical_or(data1.mask,data_m90_90.mask) + + # # plot NH 90-270 + # sc = ax1.contourf(lons_90_270, lats_90_270, data_90_270, cmap=mycmap, + # transform=ccrs.PlateCarree(), + # extend='both') + # # plot NH -90-90 + # sc = ax1.contourf(lons_m90_90, lats_m90_90, data_m90_90, cmap=mycmap, + # transform=ccrs.PlateCarree(), + # extend='both') + + # # plot SH 90-270 + # sc = ax2.contourf(lons_90_270, lats_90_270, data_90_270, cmap=mycmap, + # transform=ccrs.PlateCarree(), + # extend='both') + # # plot SH -90-90 + # sc = ax2.contourf(lons_m90_90, lats_m90_90, data_m90_90, cmap=mycmap, + # transform=ccrs.PlateCarree(), + # extend='both') + + + #plt.suptitle('CICE Mean Ice Thickness\n{}'.format(case), y=0.95) + plt.suptitle(f'CICE Mean Ice Thickness\n{case:s}') + + # add more whitespace between plots for colorbar. + plt.subplots_adjust(wspace=0.4) + + # add separate axes for colorbars + # first get position/size of current axes + pos1 = ax1.get_position() + pos2 = ax2.get_position() + + # now add new colormap axes using the position ax1, ax2 as reference + cax1 = fig.add_axes([pos1.x0+pos1.width+0.03, + pos1.y0, + 0.02, + pos1.height]) + + cax2 = fig.add_axes([pos2.x0+pos2.width+0.03, + pos2.y0, + 0.02, + pos2.height]) - m.drawparallels(np.arange(-90.,120.,15.),labels=[1,0,0,0]) # draw parallels - m.drawmeridians(np.arange(0.,420.,30.),labels=[1,1,1,1]) # draw meridians - # Plot the southern hemisphere data as a scatter plot - plt.sca(axes[1]) - m = Basemap(projection='spstere', boundinglat=-45,lon_0=270, resolution='l') - m.drawcoastlines() - m.fillcontinents() - m.drawcountries() + if '\n- ' in case: + # If making a difference plot, use scientific notation for colorbar + cbNH = plt.colorbar(scNH, cax=cax1, orientation="vertical", + pad=0.1, format="%.1e") + cbSH = plt.colorbar(scSH, cax=cax2, orientation="vertical", + pad=0.1, format="%.1e") - if plot_type == 'scatter': - x, y = m(lon,lat) - sc = m.scatter(x, y, c=data, cmap='jet', lw=0, s=4) else: - x, y = m(lon1.data, lat1.data) + #pass + # If plotting non-difference data, do not use scientific notation for colorbar + cbNH = plt.colorbar(scNH, cax=cax1, orientation="vertical", + pad=0.1, format="%.2f") + cbSH = plt.colorbar(scSH, cax=cax2, orientation="vertical", + pad=0.1, format="%.2f") - # Bandaid for a bug in the version of Basemap used during development - outside = (x <= m.xmin) | (x >= m.xmax) | (y <= m.ymin) | (y >= m.ymax) - tmp = np.ma.masked_where(outside,d1) - - if plot_type == 'contour': - sc = m.contourf(x, y, tmp, cmap='jet') - else: # pcolor - sc = m.pcolor(x, y, tmp, cmap='jet') - - m.drawparallels(np.arange(-90.,120.,15.),labels=[1,0,0,0]) # draw parallels - m.drawmeridians(np.arange(0.,420.,30.),labels=[1,1,1,1]) # draw meridians - - plt.suptitle('CICE Mean Ice Thickness\n{}'.format(case), y=0.95) - - # Make some room at the bottom of the figure, and create a colorbar - fig.subplots_adjust(bottom=0.2) - cbar_ax = fig.add_axes([0.11,0.1,0.8,0.05]) - if '\n- ' in case: - # If making a difference plot, use scientific notation for colorbar - cb = plt.colorbar(sc, cax=cbar_ax, orientation="horizontal", format="%.2e") - else: - # If plotting non-difference data, do not use scientific notation for colorbar - cb = plt.colorbar(sc, cax=cbar_ax, orientation="horizontal", format="%.2f") - cb.set_label(units, x=1.0) + cbNH.set_label(units, loc='center') + cbSH.set_label(units, loc='center') outfile = 'ice_thickness_{}.png'.format(case.replace('\n- ','_minus_')) logger.info('Creating map of the data ({})'.format(outfile)) @@ -489,7 +602,8 @@ def plot_two_stage_failures(data, lat, lon): logger.info('Creating map of the failures (two_stage_test_failure_map.png)') # Load the necessary plotting libraries import matplotlib.pyplot as plt - from mpl_toolkits.basemap import Basemap + import cartopy.crs as ccrs + import cartopy.feature as cfeature from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colors import LinearSegmentedColormap @@ -497,15 +611,19 @@ def plot_two_stage_failures(data, lat, lon): import warnings warnings.filterwarnings("ignore", category=UserWarning) - # Create the figure and axis + # Create the figure fig = plt.figure(figsize=(12, 8)) - ax = fig.add_axes([0.05, 0.08, 0.9, 0.9]) - - # Create the basemap, and draw boundaries - m = Basemap(projection='moll', lon_0=0., resolution='l') - m.drawmapboundary(fill_color='white') - m.drawcoastlines() - m.drawcountries() + + # define plot projection and create axis + pltprj = ccrs.Mollweide(central_longitude=0.0) + ax = fig.add_subplot(111,projection=pltprj) + + # add land + ax.add_feature(cfeature.LAND, color='lightgray') + ax.add_feature(cfeature.BORDERS) + ax.add_feature(cfeature.COASTLINE) + #gshhs = cfeature.GSHHSFeature(scale='auto',facecolor='lightgray',edgecolor='none') + #ax.add_feature(gshhs) # Create the custom colormap colors = [(0, 0, 1), (1, 0, 0)] # Blue, Red @@ -513,11 +631,20 @@ def plot_two_stage_failures(data, lat, lon): cm = LinearSegmentedColormap.from_list(cmap_name, colors, N=2) # Plot the data as a scatter plot - x, y = m(lon, lat) - sc = m.scatter(x, y, c=int_data, cmap=cm, lw=0, vmin=0, vmax=1, s=4) - - m.drawmeridians(np.arange(0, 360, 60), labels=[0, 0, 0, 1], fontsize=10) - m.drawparallels(np.arange(-90, 90, 30), labels=[1, 0, 0, 0], fontsize=10) + sc = ax.scatter(lon,lat,c=int_data,cmap=cm,s=4,lw=0, + vmin=0.,vmax=1., + transform=ccrs.PlateCarree()) + + # add grid lines + dlon = 60.0 + dlat = 30.0 + mpLons = np.arange(-180. ,180.0+dlon,dlon) + mpLats = np.arange(-90.,90.0+dlat ,dlat) + mpLabels = {"left": "y", + "bottom": "x"} + + ax.gridlines(xlocs=mpLons,ylocs=mpLats, + draw_labels=mpLabels) plt.title('CICE Two-Stage Test Failures') diff --git a/doc/source/user_guide/ug_testing.rst b/doc/source/user_guide/ug_testing.rst index 9d103a21a..e27dcb8d8 100644 --- a/doc/source/user_guide/ug_testing.rst +++ b/doc/source/user_guide/ug_testing.rst @@ -1050,6 +1050,7 @@ To install the necessary Python packages, the ``pip`` Python utility can be used pip install --user netCDF4 pip install --user numpy pip install --user matplotlib + pip install --user cartopy To run the validation test, setup a baseline run with the original baseline model and then a perturbation run based on recent model changes. Use ``--set qc`` in both runs in addition From 731c61dba9f94e7e43f5ce357fcd36c91bc4c3ca Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 2 Aug 2022 08:25:29 -0700 Subject: [PATCH 08/57] change visc_method default to avg_zeta, change alt07 to test avg_strength (#744) --- cice.setup | 17 +++++++++++------ cicecore/cicedynB/general/ice_init.F90 | 4 ++-- configuration/scripts/ice_in | 2 +- configuration/scripts/options/set_nml.alt07 | 2 +- doc/source/cice_index.rst | 2 +- doc/source/user_guide/ug_case_settings.rst | 2 +- 6 files changed, 17 insertions(+), 12 deletions(-) diff --git a/cice.setup b/cice.setup index 4994c7ee1..1bf6a8d56 100755 --- a/cice.setup +++ b/cice.setup @@ -1180,21 +1180,26 @@ echo "-------test--------------" echo "${testname_base}" cd ${testname_base} source ./cice.settings +set bldstat = 0 if (\${dobuild} == true) then if (\${doreuse} == true) then set ciceexe = "../ciceexe.\${ICE_TARGET}.\${ICE_ENVNAME}.\${ICE_COMMDIR}.\${ICE_BLDDEBUG}.\${ICE_THREADED}.\${ICE_IOTYPE}" ./cice.build --exe \${ciceexe} + set bldstat = \${status} if !(-e \${ciceexe}) cp -p \${ICE_RUNDIR}/cice \${ciceexe} else ./cice.build + set bldstat = \${status} endif endif -if (\${dosubmit} == true) then - set jobid = \`./cice.submit\` - echo "\$jobid" - echo "\$jobid \${ICE_TESTNAME} " >> ../suite.jobs -else if (\${dorun} == true) then - ./cice.test +if (\$bldstat == 0) then + if (\${dosubmit} == true) then + set jobid = \`./cice.submit\` + echo "\$jobid" + echo "\$jobid \${ICE_TESTNAME} " >> ../suite.jobs + else if (\${dorun} == true) then + ./cice.test + endif endif cd .. EOF diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index fdb435ffa..0c368a413 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -404,7 +404,7 @@ subroutine input_data Ktens = 0.0_dbl_kind ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) e_yieldcurve = 2.0_dbl_kind ! VP aspect ratio of elliptical yield curve e_plasticpot = 2.0_dbl_kind ! VP aspect ratio of elliptical plastic potential - visc_method = 'avg_strength' ! calc viscosities at U point: avg_strength, avg_zeta + visc_method = 'avg_zeta' ! calc viscosities at U point: avg_strength, avg_zeta deltaminEVP = 1e-11_dbl_kind ! minimum delta for viscosities (EVP, Hunke 2001) deltaminVP = 2e-9_dbl_kind ! minimum delta for viscosities (VP, Hibler 1979) capping_method = 'max' ! method for capping of viscosities (max=Hibler 1979,sum=Kreyscher2000) @@ -889,7 +889,7 @@ subroutine input_data call broadcast_scalar(Ktens, master_task) call broadcast_scalar(e_yieldcurve, master_task) call broadcast_scalar(e_plasticpot, master_task) - call broadcast_scalar(visc_method, master_task) + call broadcast_scalar(visc_method, master_task) call broadcast_scalar(deltaminEVP, master_task) call broadcast_scalar(deltaminVP, master_task) call broadcast_scalar(capping_method, master_task) diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 81446105e..ec582873a 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -147,7 +147,7 @@ Ktens = 0. e_yieldcurve = 2. e_plasticpot = 2. - visc_method = 'avg_strength' + visc_method = 'avg_zeta' elasticDamp = 0.36d0 deltaminEVP = 1e-11 deltaminVP = 2e-9 diff --git a/configuration/scripts/options/set_nml.alt07 b/configuration/scripts/options/set_nml.alt07 index 3355b6019..cb48dab1d 100644 --- a/configuration/scripts/options/set_nml.alt07 +++ b/configuration/scripts/options/set_nml.alt07 @@ -2,5 +2,5 @@ kdyn = 1 evp_algorithm = 'standard_2d' ndte = 300 capping_method = 'sum' -visc_method = 'avg_zeta' +visc_method = 'avg_strength' diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 69e98225e..99679e791 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -738,7 +738,7 @@ either Celsius or Kelvin units). "vice(n)", "volume per unit area of ice (in category n)", "m" "vicen_init", "ice volume at beginning of timestep", "m" "viscosity_dyn", "dynamic viscosity of brine", ":math:`1.79\times 10^{-3}` kg/m/s" - "visc_method", "method for calculating viscosities (‘avg_strength’ or ‘avg_zeta’)", "avg_strength" + "visc_method", "method for calculating viscosities (‘avg_strength’ or ‘avg_zeta’)", "avg_zeta" "vocn", "ocean current in the y-direction", "m/s" "vonkar", "von Karman constant", "0.4" "vraftn", "volume of rafted ice", "m" diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 50871bfc5..64264613c 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -488,7 +488,7 @@ dynamics_nml "", "``geostropic``", "computed from ocean velocity", "" "``threshold_hw``", "real", "Max water depth for grounding (see :cite:`Amundrud04`)", "30." "``use_mean_vrel``", "logical", "Use mean of two previous iterations for vrel in VP", "``.true.``" - "``visc_method``", "``avg_strength``", "average strength for viscosities on U grid", "``avg_strength``" + "``visc_method``", "``avg_strength``", "average strength for viscosities on U grid", "``avg_zeta``" "", "``avg_zeta``", "average zeta for viscosities on U grid", "" "``yield_curve``", "``ellipse``", "elliptical yield curve", "``ellipse``" "", "", "", "" From 26db2c3564cab0098e8536922a37a38beeb9ad66 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Tue, 2 Aug 2022 13:13:20 -0400 Subject: [PATCH 09/57] cice.setup: allow command line to override suite options (#745) * cice.setup: allow command line to override suite options Options chosen on the 'cice.setup' command line (using the '-s' flag) are added to the options defined for each test in the test suite definition file, when running a test suite. This is implemented by appending the options from the test suite (in variable 'sets_tmp') to the options from the command line ('sets_base') in the variable 'sets', which is ultimately (via the variable 'setsx') looped through to apply each option. Since 'sets_tmp' is appended to 'sets_base', options on the command line can't override options from the test suite file, which means one can't e.g. run a test suite with 'kdyn=3' and expect all tests to use this option if any option specified in the test suite set 'kdyn' to some other value. To allow options from the command line to override options from the test suite, reverse the order in which 'sets_tmp' and 'sets_base' are used to define 'sets'. This is in line with the common behaviour of the command line taking precedence. Adjust the documentation accordingly, fixing a typo along the way. * cice.setup: name test suite cases less ambiguously In the previous commit, we allowed options from the command line to override options from the test suite definition file. However, test case directories are still named using a sorted list of all active options, both from command line and the suite definition file (variable 'setsarray'). This is nonoptimal since it is not clear from looking at the test directory name which options have precedence in case of conflict. Change the naming logic so that options from the command line are last in the test directory name, in a "last-one-wins" fashion. To do that, let 'setsarray' be defined from the test suite options ('sets_tmp') and add a second loop for the command line options ('sets_base'). Note that we do not check if the same option is mentioned both in the test suite and the command line, in order not to complicate the code further. This also allows greatly simplifying the logic used to adjust 'bfbcomp' test names to include command line options. This part of the code is checking if the options for the test aginst which to compare contain duplicates and 'none', which is unnecessary since these options come directly from the test suite definition file. --- cice.setup | 33 ++++++++++------------------ doc/source/user_guide/ug_testing.rst | 4 ++-- 2 files changed, 14 insertions(+), 23 deletions(-) diff --git a/cice.setup b/cice.setup index 1bf6a8d56..586fe3464 100755 --- a/cice.setup +++ b/cice.setup @@ -638,7 +638,7 @@ EOF set bfbcomp_tmp = `echo $line | cut -d' ' -f5` # Append sets from .ts file to the $sets variable - set sets = "$sets_base,$sets_tmp" + set sets = "$sets_tmp,$sets_base" # Create a new bfbcomp_base variable to store bfbcomp passed to cice.setup # Use bfbcomp_base or bfbcomp_tmp @@ -761,7 +761,7 @@ EOF if (${docase} == 0) then set soptions = "" # Create sorted array and remove duplicates and "none" - set setsarray = `echo ${sets} | sed 's/,/ /g' | fmt -1 | sort -u` + set setsarray = `echo ${sets_tmp} | sed 's/,/ /g' | fmt -1 | sort -u` if ("${setsarray}" != "") then foreach field (${setsarray}) if (${field} != "none") then @@ -769,6 +769,15 @@ EOF endif end endif + # Add options from command line, sort and remove duplicates + set soptions_base = "" + set setsarray_base = `echo ${sets_base} | sed 's/,/ /g' | fmt -1 | sort -u` + if ("${setsarray_base}" != "") then + foreach field (${setsarray_base}) + set soptions = ${soptions}"_"${field} + set soptions_base = ${soptions_base}"_"${field} + end + endif # soptions starts with _ set testname_noid = "${machcomp}_${test}_${grid}_${pesx}${soptions}" set testname_base = "${machcomp}_${test}_${grid}_${pesx}${soptions}.${testid}" @@ -777,26 +786,8 @@ EOF if (${dosuite} == 1) then # Add -s flags in cice.setup to bfbcomp name - # Parse bfbcomp test_grid_pes and sets - # Add sets_base and sort unique - # Create fbfbcomp string that should be consistent with base casename - set bfbcomp_regex="\(.*_[0-9x]*\)_\(.*\)" - set bfbcomp_test_grid_pes=`echo ${bfbcomp} | sed "s/${bfbcomp_regex}/\1/"` - set bfbcomp_sets=`echo ${bfbcomp} | sed "s/${bfbcomp_regex}/\2/" | sed 's/_/,/g' ` - set bfbcomp_sets="${bfbcomp_sets},${sets_base}" - set bfbcomp_soptions = "" - # Create sorted array and remove duplicates and "none" - set bfbcomp_setsarray = `echo ${bfbcomp_sets} | sed 's/,/ /g' | fmt -1 | sort -u` - if ("${bfbcomp_setsarray}" != "") then - foreach field (${bfbcomp_setsarray}) - if (${field} != "none") then - set bfbcomp_soptions = ${bfbcomp_soptions}"_"${field} - endif - end - endif - set fbfbcomp = ${spval} if ($bfbcomp != ${spval}) then - set fbfbcomp = ${machcomp}_${bfbcomp_test_grid_pes}${bfbcomp_soptions} + set fbfbcomp = ${machcomp}_${bfbcomp}${soptions_base} endif endif endif diff --git a/doc/source/user_guide/ug_testing.rst b/doc/source/user_guide/ug_testing.rst index e27dcb8d8..05a16a6fb 100644 --- a/doc/source/user_guide/ug_testing.rst +++ b/doc/source/user_guide/ug_testing.rst @@ -325,7 +325,7 @@ If a user adds ``--set`` to the suite, all tests in that suite will add that opt ./cice.setup --suite base_suite,decomp_suite --mach wolf --env gnu --testid myid -s debug -The option settings defined in the suite have precendence over the command line +The option settings defined at the command line have precedence over the test suite values if there are conflicts. The predefined test suites are defined under **configuration/scripts/tests** and @@ -473,7 +473,7 @@ Test Suite Examples ./results.csh If there are conflicts between the ``--set`` options in the suite and on the command line, - the suite will take precedence. + the command line options will take precedence. 5) **Multiple test suites from a single command line** From 5a1701c005a71e1537eafc16e0aeef9e0caebc72 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Thu, 4 Aug 2022 13:38:35 -0700 Subject: [PATCH 10/57] Update Icepack and version number (#748) * Update Icepack Update Version * Set visc_method='avg_strength' for gridCD to avoid some aborts Fix a few bugs in the test suite lists --- cicecore/version.txt | 2 +- configuration/scripts/options/set_nml.gridcd | 3 +++ configuration/scripts/tests/omp_suite.ts | 14 +++++++------- doc/source/conf.py | 4 ++-- icepack | 2 +- 5 files changed, 14 insertions(+), 11 deletions(-) diff --git a/cicecore/version.txt b/cicecore/version.txt index 9e5f9f3e1..154cda3d7 100644 --- a/cicecore/version.txt +++ b/cicecore/version.txt @@ -1 +1 @@ -CICE 6.3.1 +CICE 6.4.0 diff --git a/configuration/scripts/options/set_nml.gridcd b/configuration/scripts/options/set_nml.gridcd index 104801879..7889e64f4 100644 --- a/configuration/scripts/options/set_nml.gridcd +++ b/configuration/scripts/options/set_nml.gridcd @@ -1,2 +1,5 @@ grid_ice = 'C_override_D' +# visc_method=avg_zeta causes some gridcd tests to abort, use avg_strength for now +visc_method = 'avg_strength' + diff --git a/configuration/scripts/tests/omp_suite.ts b/configuration/scripts/tests/omp_suite.ts index 937a3ec90..5d5e18376 100644 --- a/configuration/scripts/tests/omp_suite.ts +++ b/configuration/scripts/tests/omp_suite.ts @@ -39,7 +39,7 @@ smoke gx1 18x1 reprosum,run10day,cmplogrest,thread smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day_seabedprob smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread smoke_gx3_14x2_fsd12_reprosum_run10day smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread smoke_gx3_11x2_isotope_reprosum_run10day -smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_icdefault_reprosum_run10day_snwitdrdg_snwgrain +smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_icdefault_reprosum_run10day_snwgrain_snwitdrdg smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread smoke_gx3_6x4_dynpicard_reprosum_run10day smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread smoke_gx3_8x3_reprosum_run10day_zsal smoke gx3 4x2x25x29x4 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day_thread @@ -79,8 +79,8 @@ smoke gbox128 14x2 boxrestore,reprosum,run10day,gridc smoke gbox80 4x5 box2001,reprosum,run10day,gridc smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridc -smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridc smoke_gx3_8x4_gridc_diag1_reprosum_run10day -smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_diag1_reprosum_run10day +smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridc smoke_gx3_8x4_diag1_gridc_reprosum_run10day +smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_diag1_gridc_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x2_alt01_gridc_reprosum_run10day smoke gx3 8x1 alt02,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt02_gridc_reprosum_run10day #smoke gx3 8x1 alt03,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_12x2_alt03_droundrobin_gridc_reprosum_run10day @@ -93,7 +93,7 @@ smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridc smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day_seabedprob smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_14x2_fsd12_gridc_reprosum_run10day smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_11x2_gridc_isotope_reprosum_run10day -smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_icdefault_reprosum_run10day_snwitdrdg_snwgrain +smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_icdefault_reprosum_run10day_snwgrain_snwitdrdg #smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x4_dynpicard_gridc_reprosum_run10day smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x3_gridc_reprosum_run10day_zsal smoke gx3 4x2x25x29x4 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day_thread @@ -133,8 +133,8 @@ smoke gbox128 14x2 boxrestore,reprosum,run10day,gridcd smoke gbox80 4x5 box2001,reprosum,run10day,gridcd smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridcd -smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridcd smoke_gx3_8x4_gridcd_diag1_reprosum_run10day -smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_diag1_reprosum_run10day +smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridcd smoke_gx3_8x4_diag1_gridcd_reprosum_run10day +smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_diag1_gridcd_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x2_alt01_gridcd_reprosum_run10day smoke gx3 8x1 alt02,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt02_gridcd_reprosum_run10day #smoke gx3 8x1 alt03,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_12x2_alt03_droundrobin_gridcd_reprosum_run10day @@ -147,7 +147,7 @@ smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridcd smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day_seabedprob smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_14x2_fsd12_gridcd_reprosum_run10day smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_11x2_gridcd_isotope_reprosum_run10day -smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_icdefault_reprosum_run10day_snwitdrdg_snwgrain +smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_icdefault_reprosum_run10day_snwgrain_snwitdrdg #smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x4_dynpicard_gridcd_reprosum_run10day smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x3_gridcd_reprosum_run10day_zsal smoke gx3 4x2x25x29x4 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day_thread diff --git a/doc/source/conf.py b/doc/source/conf.py index 8b9aecaa6..a1b2871ae 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -62,9 +62,9 @@ # built documents. # # The short X.Y version. -version = u'6.3.1' +version = u'6.4.0' # The full version, including alpha/beta/rc tags. -version = u'6.3.1' +version = u'6.4.0' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/icepack b/icepack index 3cb1746a2..4fea17c15 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 3cb1746a202615044e38e2928f85294f9c0e72f8 +Subproject commit 4fea17c15fb63e1424cd71c0ef4365e2135d32db From 08c6b33a7a68ccca7752d0ab97516665b504c43d Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Fri, 12 Aug 2022 13:21:54 -0400 Subject: [PATCH 11/57] ice_grid: do call 'gridbox_verts' for rectangular grids (#749) At the end of subroutine ice_grid::gridbox_corners, the arrays 'lont_bounds' and 'lonu_bounds', which contain the longitude of the corners of each grid cell on the T and U grids, are converted to to the [0, 360] range. In the case of rectangular grids ('grid_type = rectangular'), at the point where 'gridbox_corners' is called in 'init_grid2', 'lont_bounds' is not initialized, causing the code to abort if compiling with NaN initialization. This is due to the fact that 'gridbox_verts', which initializes 'lont_bounds' and 'latt_bounds', is not called in 'rectgrid', whereas it is called in 'popgrid[_nc]'. Do call 'gridbox_verts' in 'rectgrid', so that 'lont_bounds' and 'latt_bounds' are correctly initalized in that case also. Note that these calls are also missing in 'latlongrid' and 'cpomgrid', but since these two subroutines are not used in standalone configuration, let's not bother for now. --- cicecore/cicedynB/infrastructure/ice_grid.F90 | 2 ++ 1 file changed, 2 insertions(+) diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 0ea779399..84f9f6547 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -1404,6 +1404,7 @@ subroutine rectgrid enddo work_g1(:,:) = work_g1(:,:) / rad_to_deg endif + call gridbox_verts(work_g1,lont_bounds) call scatter_global(ULON, work_g1, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) call ice_HaloExtrapolate(ULON, distrb_info, & @@ -1423,6 +1424,7 @@ subroutine rectgrid enddo work_g1(:,:) = work_g1(:,:) / rad_to_deg endif + call gridbox_verts(work_g1,latt_bounds) call scatter_global(ULAT, work_g1, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) call ice_HaloExtrapolate(ULAT, distrb_info, & From d673e44b363d94f0ff605472b2d70d5522c68ed6 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Mon, 15 Aug 2022 14:24:32 -0700 Subject: [PATCH 12/57] Clean up code and add several minor features (#750) * Update/improve debug_blocks output, see #718. * Add ICE_MEMUSE cice.settings flag for batch memory use Add set_env.memsmall, memmed, memlarge options To use, will require changes to the env machine files. Most machines will probably not use it. See #674. * Add setup_machparams.csh to compute batch/launch machine parameters Update cice.batch.csh and cice.launch.csh to use setup_machparams.csh See #650 * Update subroutine diagnostic_abort which calls print_state Update ice_transport_remap and ice_transport_driver to call diagnostic_abort during some errors. See also #622 * Update miniconda install information See #547 * Code cleanup based on compile with -Wall Code cleanup based on -std f2003 and f2008 checks Add -stand f08 to cheyenne_intel debug flags Add -std f2008 to cheyenne_gnu debug flags Code consistent with Fortran 2003 except for use of contiguous in 1d evp code. * Remove all trailing blank space with script * Update the cheyenne env so qc testing works Add configuration/scripts/tests/qctest.yml file Update documentation * Update Icepack * Clean up some output * fix comments * update print_state output --- .../cicedynB/analysis/ice_diagnostics.F90 | 119 ++- .../cicedynB/analysis/ice_diagnostics_bgc.F90 | 328 ++++---- cicecore/cicedynB/analysis/ice_history.F90 | 428 +++++------ .../cicedynB/analysis/ice_history_bgc.F90 | 720 +++++++++--------- .../cicedynB/analysis/ice_history_drag.F90 | 50 +- .../cicedynB/analysis/ice_history_fsd.F90 | 13 +- .../cicedynB/analysis/ice_history_mechred.F90 | 6 +- .../cicedynB/analysis/ice_history_pond.F90 | 24 +- .../cicedynB/analysis/ice_history_shared.F90 | 38 +- .../cicedynB/analysis/ice_history_snow.F90 | 13 +- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 2 +- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 8 +- cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 | 3 +- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 30 +- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 9 +- .../dynamics/ice_transport_driver.F90 | 3 +- .../cicedynB/dynamics/ice_transport_remap.F90 | 29 +- cicecore/cicedynB/general/ice_flux.F90 | 19 +- cicecore/cicedynB/general/ice_flux_bgc.F90 | 70 +- cicecore/cicedynB/general/ice_forcing.F90 | 591 +++++++------- cicecore/cicedynB/general/ice_forcing_bgc.F90 | 144 ++-- cicecore/cicedynB/general/ice_init.F90 | 104 +-- cicecore/cicedynB/general/ice_state.F90 | 12 +- cicecore/cicedynB/general/ice_step_mod.F90 | 116 +-- .../infrastructure/comm/mpi/ice_boundary.F90 | 594 +++++++-------- .../comm/mpi/ice_communicate.F90 | 2 +- .../infrastructure/comm/mpi/ice_exit.F90 | 2 +- .../comm/mpi/ice_gather_scatter.F90 | 57 +- .../comm/mpi/ice_global_reductions.F90 | 47 +- .../infrastructure/comm/mpi/ice_reprosum.F90 | 322 ++++---- .../infrastructure/comm/mpi/ice_timers.F90 | 30 +- .../comm/serial/ice_boundary.F90 | 556 +++++++------- .../comm/serial/ice_gather_scatter.F90 | 12 +- .../comm/serial/ice_global_reductions.F90 | 47 +- .../comm/serial/ice_reprosum.F90 | 320 ++++---- .../infrastructure/comm/serial/ice_timers.F90 | 36 +- .../cicedynB/infrastructure/ice_blocks.F90 | 26 +- .../cicedynB/infrastructure/ice_domain.F90 | 55 +- cicecore/cicedynB/infrastructure/ice_grid.F90 | 133 ++-- .../cicedynB/infrastructure/ice_memusage.F90 | 16 +- .../infrastructure/ice_memusage_gptl.c | 12 +- .../infrastructure/ice_read_write.F90 | 198 +++-- .../infrastructure/ice_restart_driver.F90 | 20 +- .../cicedynB/infrastructure/ice_restoring.F90 | 18 +- .../io/io_binary/ice_history_write.F90 | 4 +- .../io/io_binary/ice_restart.F90 | 10 +- .../io/io_netcdf/ice_history_write.F90 | 17 +- .../io/io_netcdf/ice_restart.F90 | 30 +- .../io/io_pio2/ice_history_write.F90 | 32 +- .../infrastructure/io/io_pio2/ice_pio.F90 | 58 +- .../infrastructure/io/io_pio2/ice_restart.F90 | 20 +- cicecore/drivers/direct/hadgem3/CICE.F90 | 18 +- .../drivers/direct/hadgem3/CICE_InitMod.F90 | 30 +- .../drivers/direct/hadgem3/CICE_RunMod.F90 | 50 +- .../direct/nemo_concepts/CICE_InitMod.F90 | 30 +- .../direct/nemo_concepts/CICE_RunMod.F90 | 50 +- cicecore/drivers/mct/cesm1/CICE_InitMod.F90 | 30 +- cicecore/drivers/mct/cesm1/CICE_RunMod.F90 | 38 +- cicecore/drivers/mct/cesm1/CICE_copyright.txt | 16 +- cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 | 88 +-- cicecore/drivers/mct/cesm1/ice_comp_mct.F90 | 148 ++-- .../drivers/mct/cesm1/ice_cpl_indices.F90 | 36 +- .../drivers/mct/cesm1/ice_import_export.F90 | 90 +-- .../drivers/mct/cesm1/ice_prescribed_mod.F90 | 54 +- cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 | 8 +- .../drivers/nuopc/cmeps/CICE_copyright.txt | 16 +- cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 | 4 +- .../nuopc/cmeps/ice_prescribed_mod.F90 | 2 +- cicecore/drivers/nuopc/dmi/CICE.F90 | 18 +- cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 | 30 +- cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 | 40 +- cicecore/drivers/nuopc/dmi/cice_cap.info | 76 +- cicecore/drivers/standalone/cice/CICE.F90 | 18 +- .../drivers/standalone/cice/CICE_FinalMod.F90 | 1 - .../drivers/standalone/cice/CICE_InitMod.F90 | 32 +- .../drivers/standalone/cice/CICE_RunMod.F90 | 40 +- .../unittest/gridavgchk/CICE_InitMod.F90 | 32 +- .../unittest/gridavgchk/gridavgchk.F90 | 4 +- cicecore/drivers/unittest/optargs/optargs.F90 | 4 +- .../drivers/unittest/sumchk/CICE_InitMod.F90 | 32 +- cicecore/drivers/unittest/sumchk/sumchk.F90 | 4 +- cicecore/shared/ice_arrays_column.F90 | 84 +- cicecore/shared/ice_calendar.F90 | 9 +- cicecore/shared/ice_constants.F90 | 29 +- cicecore/shared/ice_distribution.F90 | 46 +- cicecore/shared/ice_domain_size.F90 | 4 +- cicecore/shared/ice_fileunits.F90 | 8 +- cicecore/shared/ice_init_column.F90 | 282 +++---- cicecore/shared/ice_restart_column.F90 | 106 +-- cicecore/shared/ice_restart_shared.F90 | 2 +- cicecore/shared/ice_spacecurve.F90 | 81 +- configuration/scripts/cice.batch.csh | 40 +- configuration/scripts/cice.launch.csh | 13 +- configuration/scripts/cice.settings | 1 + .../scripts/machines/Macros.cheyenne_gnu | 2 +- .../scripts/machines/Macros.cheyenne_intel | 2 +- .../scripts/machines/env.cheyenne_gnu | 5 +- .../scripts/machines/env.cheyenne_intel | 5 +- .../scripts/machines/env.cheyenne_pgi | 5 +- .../scripts/options/set_env.memlarge | 2 + configuration/scripts/options/set_env.memmed | 2 + .../scripts/options/set_env.memsmall | 2 + configuration/scripts/setup_machparams.csh | 64 ++ configuration/scripts/tests/qctest.yml | 11 + doc/source/user_guide/ug_implementation.rst | 4 +- doc/source/user_guide/ug_running.rst | 6 +- doc/source/user_guide/ug_testing.rst | 7 + icepack | 2 +- 108 files changed, 3697 insertions(+), 3719 deletions(-) create mode 100644 configuration/scripts/options/set_env.memlarge create mode 100644 configuration/scripts/options/set_env.memmed create mode 100644 configuration/scripts/options/set_env.memsmall create mode 100755 configuration/scripts/setup_machparams.csh create mode 100644 configuration/scripts/tests/qctest.yml diff --git a/cicecore/cicedynB/analysis/ice_diagnostics.F90 b/cicecore/cicedynB/analysis/ice_diagnostics.F90 index ec5ad05fa..83eb840d6 100644 --- a/cicecore/cicedynB/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedynB/analysis/ice_diagnostics.F90 @@ -11,6 +11,7 @@ module ice_diagnostics use ice_kinds_mod + use ice_blocks, only: nx_block, ny_block use ice_communicate, only: my_task, master_task use ice_constants, only: c0, c1 use ice_calendar, only: istep1 @@ -52,7 +53,7 @@ module ice_diagnostics real (kind=dbl_kind), parameter :: & umax_stab = 1.0_dbl_kind , & ! ice speed threshold for instability (m/s) aice_extmin = 0.15_dbl_kind ! min aice value for ice extent calc - + real (kind=dbl_kind), dimension(npnt), public :: & latpnt , & ! latitude of diagnostic points lonpnt ! longitude of diagnostic points @@ -112,7 +113,6 @@ module ice_diagnostics subroutine runtime_diags (dt) use ice_arrays_column, only: floe_rad_c - use ice_blocks, only: nx_block, ny_block use ice_broadcast, only: broadcast_scalar use ice_constants, only: c1, c1000, c2, p001, p5, & field_loc_center, m2_to_km2 @@ -163,7 +163,7 @@ subroutine runtime_diags (dt) etotn, mtotn, micen, msnwn, pmaxn, ketotn, & etots, mtots, mices, msnws, pmaxs, ketots, & urmsn, albtotn, arean_alb, mpndn, ptotn, spondn, & - urmss, albtots, areas_alb, mpnds, ptots, sponds + urmss, albtots, areas_alb, mpnds, ptots, sponds ! hemispheric flux quantities real (kind=dbl_kind) :: & @@ -191,7 +191,7 @@ subroutine runtime_diags (dt) ! fields at diagnostic points real (kind=dbl_kind), dimension(npnt) :: & - paice, pTair, pQa, pfsnow, pfrain, pfsw, pflw, & + paice, pTair, pQa, pfsnow, pfrain, pfsw, pflw, & pTsfc, pevap, pfswabs, pflwout, pflat, pfsens, & pfsurf, pfcondtop, psst, psss, pTf, hiavg, hsavg, hbravg, & pfhocn, psalt, fsdavg, & @@ -202,7 +202,7 @@ subroutine runtime_diags (dt) work1, work2 real (kind=dbl_kind), parameter :: & - maxval_spval = -0.9_dbl_kind*HUGE(0.0_dbl_kind) ! spval to detect + maxval_spval = -0.9_dbl_kind*HUGE(0.0_dbl_kind) ! spval to detect ! undefined values returned from global_maxval. if global_maxval ! is applied to a region that does not exist (for instance ! southern hemisphere in box cases), global_maxval @@ -290,7 +290,7 @@ subroutine runtime_diags (dt) do n = 1, ncat work1(i,j,iblk) = work1(i,j,iblk) & + aicen(i,j,n,iblk) & - * trcrn(i,j,nt_apnd,n,iblk) & + * trcrn(i,j,nt_apnd,n,iblk) & * trcrn(i,j,nt_hpnd,n,iblk) enddo enddo @@ -369,8 +369,8 @@ subroutine runtime_diags (dt) enddo enddo !$OMP END PARALLEL DO - - arean_alb = global_sum(aice, distrb_info, field_loc_center, work2) + + arean_alb = global_sum(aice, distrb_info, field_loc_center, work2) albtotn = global_sum_prod(aice, work1, distrb_info, & field_loc_center, work2) @@ -395,7 +395,7 @@ subroutine runtime_diags (dt) enddo !$OMP END PARALLEL DO - areas_alb = global_sum(aice, distrb_info, field_loc_center, work2) + areas_alb = global_sum(aice, distrb_info, field_loc_center, work2) albtots = global_sum_prod(aice, work1, distrb_info, & field_loc_center, work2) @@ -506,7 +506,7 @@ subroutine runtime_diags (dt) if (pmaxs < maxval_spval) pmaxs = c0 pmaxn = pmaxn / c1000 ! convert to kN/m - pmaxs = pmaxs / c1000 + pmaxs = pmaxs / c1000 if (print_global) then @@ -617,14 +617,14 @@ subroutine runtime_diags (dt) enddo !$OMP END PARALLEL DO - else ! fsurf is computed by atmosphere model + else ! fsurf is computed by atmosphere model !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = & - (fsurf(i,j,iblk) - flat(i,j,iblk)) & + (fsurf(i,j,iblk) - flat(i,j,iblk)) & * aice(i,j,iblk) enddo enddo @@ -639,7 +639,7 @@ subroutine runtime_diags (dt) field_loc_center, tarean) fhatms = global_sum(work1, distrb_info, & field_loc_center, tareas) - + !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block @@ -741,7 +741,7 @@ subroutine runtime_diags (dt) ! total ice, snow and pond mass mtotn = micen + msnwn + mpndn mtots = mices + msnws + mpnds - + ! mass change since beginning of time step delmin = mtotn - totmn delmis = mtots - totms @@ -760,14 +760,14 @@ subroutine runtime_diags (dt) fluxs = c0 if( arean > c0) then ! water associated with frazil ice included in fresh - fluxn = rnn + snn + evpn - sfreshn + fluxn = rnn + snn + evpn - sfreshn if (.not. update_ocn_f) then fluxn = fluxn + frzn endif endif if( areas > c0) then ! water associated with frazil ice included in fresh - fluxs = rns + sns + evps - sfreshs + fluxs = rns + sns + evps - sfreshs if (.not. update_ocn_f) then fluxs = fluxs + frzs endif @@ -933,7 +933,7 @@ subroutine runtime_diags (dt) pfsw(n) = fsw(i,j,iblk) ! shortwave radiation pflw(n) = flw(i,j,iblk) ! longwave radiation paice(n) = aice(i,j,iblk) ! ice area - + fsdavg(n) = c0 ! avg floe effective radius hiavg(n) = c0 ! avg snow/ice thickness hsavg(n) = c0 @@ -998,7 +998,7 @@ subroutine runtime_diags (dt) pcongel(n) = congel(i,j,iblk) ! congelation ice pdhi(n) = vice(i,j,iblk) - pdhi(n) ! ice thickness change pdhs(n) = vsno(i,j,iblk) - pdhs(n) ! snow thickness change - pde(n) =-(work1(i,j,iblk)- pde(n))/dt ! ice/snow energy change + pde(n) =-(work1(i,j,iblk)- pde(n))/dt ! ice/snow energy change psst(n) = sst(i,j,iblk) ! sea surface temperature psss(n) = sss(i,j,iblk) ! sea surface salinity pTf(n) = Tf(i,j,iblk) ! freezing temperature @@ -1045,7 +1045,7 @@ subroutine runtime_diags (dt) call broadcast_scalar(psss (n), pmloc(n)) call broadcast_scalar(pTf (n), pmloc(n)) call broadcast_scalar(pfhocn (n), pmloc(n)) - + enddo ! npnt endif ! print_points @@ -1093,7 +1093,7 @@ subroutine runtime_diags (dt) write(nu_diag,901) 'arwt snw mass (kg) = ',msnwn,msnws if (tr_pond_topo) & write(nu_diag,901) 'arwt pnd mass (kg) = ',mpndn,mpnds - + write(nu_diag,901) 'arwt tot mass (kg) = ',mtotn,mtots write(nu_diag,901) 'arwt tot mass chng(kg) = ',delmin,delmis write(nu_diag,901) 'arwt water flux = ',fluxn,fluxs @@ -1249,9 +1249,6 @@ subroutine runtime_diags (dt) endif ! print_points endif ! my_task = master_task - 799 format (27x,a24) - 800 format (a25,2x,f24.17) - 801 format (a25,2x,1pe24.17) 899 format (27x,a24,2x,a24) 900 format (a25,2x,f24.17,2x,f24.17) 901 format (a25,2x,1pe24.17,2x,1pe24.17) @@ -1268,7 +1265,6 @@ end subroutine runtime_diags subroutine init_mass_diags - use ice_blocks, only: nx_block, ny_block use ice_constants, only: field_loc_center use ice_domain, only: distrb_info, nblocks use ice_domain_size, only: n_iso, n_aero, ncat, max_blocks @@ -1387,7 +1383,7 @@ subroutine init_mass_diags do n = 1, ncat work1(i,j,iblk) = work1(i,j,iblk) & + aicen(i,j,n,iblk) & - * trcrn(i,j,nt_apnd,n,iblk) & + * trcrn(i,j,nt_apnd,n,iblk) & * trcrn(i,j,nt_hpnd,n,iblk) enddo enddo @@ -1412,7 +1408,6 @@ end subroutine init_mass_diags subroutine total_energy (work) - use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks use ice_domain_size, only: ncat, nilyr, nslyr, max_blocks use ice_grid, only: tmask @@ -1499,7 +1494,6 @@ end subroutine total_energy subroutine total_salt (work) - use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks use ice_domain_size, only: ncat, nilyr, max_blocks use ice_grid, only: tmask @@ -1623,7 +1617,7 @@ subroutine init_diags plat(:) = -999._dbl_kind plon(:) = -999._dbl_kind - ! find minimum distance to diagnostic points on this processor + ! find minimum distance to diagnostic points on this processor do n = 1, npnt if (lonpnt(n) > c180) lonpnt(n) = lonpnt(n) - c360 @@ -1638,7 +1632,7 @@ subroutine init_diags ! This is computing closest point, Could add a CRITICAL but it's just initialization !!$XXXOMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,latdis,londis,totdis) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -1665,7 +1659,7 @@ subroutine init_diags endif - ! find global minimum distance to diagnostic points + ! find global minimum distance to diagnostic points mindis_g = global_minval(mindis, distrb_info) ! save indices of minimum-distance grid cell @@ -1708,16 +1702,10 @@ end subroutine init_diags subroutine debug_ice(iblk, plabeld) - use ice_kinds_mod - use ice_calendar, only: istep1 - use ice_communicate, only: my_task - use ice_blocks, only: nx_block, ny_block - character (char_len), intent(in) :: plabeld integer (kind=int_kind), intent(in) :: iblk - ! local - integer (kind=int_kind) :: i, j, m + ! local character(len=*), parameter :: subname='(debug_ice)' if (istep1 >= debug_model_step) then @@ -1757,7 +1745,8 @@ subroutine print_state(plabel,i,j,iblk) use ice_blocks, only: block, get_block use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, nfsd - use ice_state, only: aice0, aicen, vicen, vsnon, uvel, vvel, & + use ice_grid, only: TLAT, TLON + use ice_state, only: aice, aice0, aicen, vicen, vsnon, uvel, vvel, & uvelE, vvelE, uvelN, vvelN, trcrn use ice_flux, only: uatm, vatm, potT, Tair, Qa, flw, frain, fsnow, & fsens, flat, evap, flwout, swvdr, swvdf, swidr, swidf, rhoa, & @@ -1765,7 +1754,7 @@ subroutine print_state(plabel,i,j,iblk) character (len=20), intent(in) :: plabel - integer (kind=int_kind), intent(in) :: & + integer (kind=int_kind), intent(in) :: & i, j , & ! horizontal indices iblk ! block index @@ -1799,15 +1788,20 @@ subroutine print_state(plabel,i,j,iblk) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) - write(nu_diag,*) subname,plabel - write(nu_diag,*) 'istep1, my_task, i, j, iblk:', & + write(nu_diag,*) subname,' ',trim(plabel) + write(nu_diag,*) subname,' istep1, my_task, i, j, iblk:', & istep1, my_task, i, j, iblk - write(nu_diag,*) 'Global i and j:', & + write(nu_diag,*) subname,' Global block:', this_block%block_id + write(nu_diag,*) subname,' Global i and j:', & this_block%i_glob(i), & - this_block%j_glob(j) + this_block%j_glob(j) + write (nu_diag,*) subname,' Lat, Lon (degrees):', & + TLAT(i,j,iblk)*rad_to_deg, & + TLON(i,j,iblk)*rad_to_deg write(nu_diag,*) ' ' + write(nu_diag,*) 'aice ', aice(i,j,iblk) write(nu_diag,*) 'aice0', aice0(i,j,iblk) do n = 1, ncat write(nu_diag,*) ' ' @@ -1977,7 +1971,7 @@ subroutine print_points_state(plabel,ilabel) i = piloc(m) j = pjloc(m) iblk = pbloc(m) - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) if (present(ilabel)) then write(llabel,'(i6,a1,i3,a1)') ilabel,':',m,':' @@ -1995,7 +1989,7 @@ subroutine print_points_state(plabel,ilabel) istep1, my_task, i, j, iblk write(nu_diag,*) trim(llabel),'Global i and j=', & this_block%i_glob(i), & - this_block%j_glob(j) + this_block%j_glob(j) write(nu_diag,*) trim(llabel),'aice0=', aice0(i,j,iblk) do n = 1, ncat @@ -2089,20 +2083,18 @@ end subroutine print_points_state ! prints error information prior to aborting - subroutine diagnostic_abort(istop, jstop, iblk, istep1, stop_label) + subroutine diagnostic_abort(istop, jstop, iblk, stop_label) use ice_blocks, only: block, get_block - use ice_communicate, only: my_task use ice_domain, only: blocks_ice use ice_grid, only: TLAT, TLON use ice_state, only: aice integer (kind=int_kind), intent(in) :: & istop, jstop, & ! indices of grid cell where model aborts - iblk , & ! block index - istep1 ! time step number + iblk ! block index - character (char_len), intent(in) :: stop_label + character (len=*), intent(in) :: stop_label ! local variables @@ -2118,20 +2110,17 @@ subroutine diagnostic_abort(istop, jstop, iblk, istep1, stop_label) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - this_block = get_block(blocks_ice(iblk),iblk) - - write (nu_diag,*) 'istep1, my_task, iblk =', & - istep1, my_task, iblk - write (nu_diag,*) 'Global block:', this_block%block_id - if (istop > 0 .and. jstop > 0) & - write (nu_diag,*) 'Global i and j:', & - this_block%i_glob(istop), & - this_block%j_glob(jstop) - write (nu_diag,*) 'Lat, Lon:', & - TLAT(istop,jstop,iblk)*rad_to_deg, & - TLON(istop,jstop,iblk)*rad_to_deg - write (nu_diag,*) 'aice:', & - aice(istop,jstop,iblk) + this_block = get_block(blocks_ice(iblk),iblk) + + call flush_fileunit(nu_diag) + if (istop > 0 .and. jstop > 0) then + call print_state(trim(stop_label),istop,jstop,iblk) + else + write (nu_diag,*) subname,' istep1, my_task, iblk =', & + istep1, my_task, iblk + write (nu_diag,*) subname,' Global block:', this_block%block_id + endif + call flush_fileunit(nu_diag) call abort_ice (subname//'ERROR: '//trim(stop_label)) end subroutine diagnostic_abort diff --git a/cicecore/cicedynB/analysis/ice_diagnostics_bgc.F90 b/cicecore/cicedynB/analysis/ice_diagnostics_bgc.F90 index 74485a5e2..f4528dd5d 100644 --- a/cicecore/cicedynB/analysis/ice_diagnostics_bgc.F90 +++ b/cicecore/cicedynB/analysis/ice_diagnostics_bgc.F90 @@ -41,7 +41,7 @@ module ice_diagnostics_bgc ! Nicole Jeffery, LANL subroutine hbrine_diags - + use ice_arrays_column, only: darcy_V use ice_broadcast, only: broadcast_scalar, broadcast_array use ice_diagnostics, only: npnt, print_points, pmloc, piloc, pjloc, pbloc @@ -84,27 +84,27 @@ subroutine hbrine_diags if (my_task == pmloc(n)) then i = piloc(n) j = pjloc(n) - iblk = pbloc(n) - phinS1(n) = c0 - phinS(n) = c0 - pfbri(n) = trcrn(i,j,nt_fbri,1,iblk) + iblk = pbloc(n) + phinS1(n) = c0 + phinS(n) = c0 + pfbri(n) = trcrn(i,j,nt_fbri,1,iblk) pdarcy_V(n) = darcy_V(i,j,1,iblk) if (aice(i,j,iblk) > c0) & phinS(n) = trcr(i,j,nt_fbri,iblk)*vice(i,j,iblk)/aice(i,j,iblk) if (aicen(i,j,1,iblk)> c0) & phinS1(n) = trcrn(i,j,nt_fbri,1,iblk)*vicen(i,j,1,iblk)/& - aicen(i,j,1,iblk) + aicen(i,j,1,iblk) do k = 1,nilyr pSin1(n,k) = trcrn(i,j,nt_sice+k-1,1,iblk) pSin(n,k) = trcr(i,j,nt_sice+k-1,iblk) enddo endif ! my_task = pmloc - - call broadcast_array (pSin (n,:), pmloc(n)) - call broadcast_array (pSin1 (n,:), pmloc(n)) - call broadcast_scalar(pfbri (n), pmloc(n)) - call broadcast_scalar(phinS1 (n), pmloc(n)) - call broadcast_scalar(phinS (n), pmloc(n)) + + call broadcast_array (pSin (n,:), pmloc(n)) + call broadcast_array (pSin1 (n,:), pmloc(n)) + call broadcast_scalar(pfbri (n), pmloc(n)) + call broadcast_scalar(phinS1 (n), pmloc(n)) + call broadcast_scalar(phinS (n), pmloc(n)) call broadcast_scalar(pdarcy_V(n), pmloc(n)) enddo ! npnt endif ! print_points @@ -125,22 +125,22 @@ subroutine hbrine_diags write(nu_diag,*) '------ hbrine ------' write(nu_diag,900) 'hbrine, (m) = ',phinS(1),phinS(2) write(nu_diag,900) 'fbri, cat1 (m) = ',pfbri(1),pfbri(2) - write(nu_diag,900) 'hbrine cat1, (m) = ',phinS1(1),phinS1(2) - write(nu_diag,900) 'darcy_V cat1, (m/s)= ',pdarcy_V(1),pdarcy_V(2) - if (ktherm == 2) then + write(nu_diag,900) 'hbrine cat1, (m) = ',phinS1(1),phinS1(2) + write(nu_diag,900) 'darcy_V cat1, (m/s)= ',pdarcy_V(1),pdarcy_V(2) + if (ktherm == 2) then write(nu_diag,*) ' ' write(nu_diag,*) '------ Thermosaline Salinity ------' write(nu_diag,803) 'Sice1(1) cat1 S (ppt)','Sice1(2) cat1 S' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pSin1(n,k),n=1,2), k = 1,nilyr) + write(nu_diag,802) ((pSin1(n,k),n=1,2), k = 1,nilyr) write(nu_diag,*) ' ' write(nu_diag,803) 'Sice(1) bulk S (ppt) ','Sice(2) bulk S' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pSin(n,k),n=1,2), k = 1,nilyr) + write(nu_diag,802) ((pSin(n,k),n=1,2), k = 1,nilyr) write(nu_diag,*) ' ' endif endif ! print_points - endif ! my_task = master_task + endif ! my_task = master_task 802 format (f24.17,2x,f24.17) 803 format (a25,2x,a25) @@ -198,7 +198,7 @@ subroutine bgc_diags integer (kind=int_kind), dimension(icepack_max_doc) :: & nt_bgc_doc, nlt_bgc_DOC integer (kind=int_kind), dimension(icepack_max_don) :: & - nt_bgc_don, nlt_bgc_DON + nt_bgc_don, nlt_bgc_DON integer (kind=int_kind), dimension(icepack_max_aero) :: & nt_zaero, nlt_zaero, nlt_zaero_sw integer (kind=int_kind), dimension(icepack_max_fe) :: & @@ -211,7 +211,7 @@ subroutine bgc_diags real (kind=dbl_kind), dimension(npnt,icepack_max_don) :: & pDON_ac, pDON_sk real (kind=dbl_kind), dimension(npnt,icepack_max_fe ) :: & - pFed_ac, pFed_sk, pFep_ac, pFep_sk + pFed_ac, pFed_sk, pFep_ac, pFep_sk real (kind=dbl_kind), dimension(npnt,icepack_max_aero) :: & pflux_zaero, pflux_snow_zaero, pflux_atm_zaero, & pflux_atm_zaero_s @@ -226,7 +226,7 @@ subroutine bgc_diags real (kind=dbl_kind), dimension(npnt,2,icepack_max_don) :: & pDONs real (kind=dbl_kind), dimension(npnt,2,icepack_max_fe ) :: & - pFeds, pFeps + pFeds, pFeps real (kind=dbl_kind), dimension(npnt,2,icepack_max_aero) :: & pzaeros real (kind=dbl_kind), dimension(npnt,nblyr+1) :: & @@ -240,10 +240,10 @@ subroutine bgc_diags real (kind=dbl_kind), dimension(npnt,nblyr+1,icepack_max_don) :: & pDON real (kind=dbl_kind), dimension(npnt,nblyr+1,icepack_max_fe ) :: & - pFed, pFep - real (kind=dbl_kind), dimension (nblyr+1) :: & + pFed, pFep + real (kind=dbl_kind), dimension (nblyr+1) :: & zspace - real (kind=dbl_kind), dimension (npnt,nslyr+nilyr+2) :: & + real (kind=dbl_kind), dimension (npnt,nslyr+nilyr+2) :: & pchlsw real (kind=dbl_kind), dimension(npnt,nslyr+nilyr+2,icepack_max_aero) :: & pzaerosw @@ -275,7 +275,7 @@ subroutine bgc_diags zspace(:) = c1/real(nblyr,kind=dbl_kind) zspace(1) = zspace(1)*p5 - zspace(nblyr+1) = zspace(nblyr+1)*p5 + zspace(nblyr+1) = zspace(nblyr+1)*p5 klev = 1+nilyr+nslyr !----------------------------------------------------------------- @@ -307,26 +307,26 @@ subroutine bgc_diags pNit_ac(n) = c0 if (tr_bgc_N) then do k = 1,n_algae - pN_ac(n,k) = ocean_bio(i,j,nlt_bgc_N(k),iblk) + pN_ac(n,k) = ocean_bio(i,j,nlt_bgc_N(k),iblk) enddo !n_algae endif !tr_bgc_N if (tr_bgc_C) then do k = 1,n_doc - pDOC_ac(n,k) = ocean_bio(i,j,nlt_bgc_DOC(k),iblk) + pDOC_ac(n,k) = ocean_bio(i,j,nlt_bgc_DOC(k),iblk) enddo !n_algae endif !tr_bgc_N if (tr_bgc_DON) then do k = 1,n_don - pDON_ac(n,k) = ocean_bio(i,j,nlt_bgc_DON(k),iblk) - enddo + pDON_ac(n,k) = ocean_bio(i,j,nlt_bgc_DON(k),iblk) + enddo endif if (tr_bgc_Fe ) then - do k = 1,n_fed - pFed_ac (n,k) = ocean_bio(i,j,nlt_bgc_Fed (k),iblk) - enddo - do k = 1,n_fep - pFep_ac (n,k) = ocean_bio(i,j,nlt_bgc_Fep (k),iblk) - enddo + do k = 1,n_fed + pFed_ac (n,k) = ocean_bio(i,j,nlt_bgc_Fed (k),iblk) + enddo + do k = 1,n_fep + pFep_ac (n,k) = ocean_bio(i,j,nlt_bgc_Fep (k),iblk) + enddo endif if (tr_bgc_Nit) & pNit_ac(n) = ocean_bio(i,j,nlt_bgc_Nit,iblk) ! nit(i,j,iblk) @@ -359,10 +359,10 @@ subroutine bgc_diags pDON_sk(n,:) = c0 pFed_sk(n,:) = c0 pFep_sk(n,:) = c0 - - do k = 1,n_algae + + do k = 1,n_algae pN_sk(n,k) = trcr (i,j,nt_bgc_N(k), iblk) - pflux_N(n,k) = flux_bio(i,j,nlt_bgc_N(k), iblk)*mps_to_cmpdy/c100 + pflux_N(n,k) = flux_bio(i,j,nlt_bgc_N(k), iblk)*mps_to_cmpdy/c100 enddo if (tr_bgc_C) then do k = 1,n_doc @@ -375,27 +375,27 @@ subroutine bgc_diags enddo endif if (tr_bgc_Fe ) then - do k = 1,n_fed + do k = 1,n_fed pFed_sk (n,k)= trcr (i,j,nt_bgc_Fed(k), iblk) enddo - do k = 1,n_fep + do k = 1,n_fep pFep_sk (n,k)= trcr (i,j,nt_bgc_Fep(k), iblk) enddo endif if (tr_bgc_Nit) then - pNit_sk(n) = trcr (i,j, nt_bgc_Nit, iblk) - pflux_NO(n) = flux_bio(i,j,nlt_bgc_Nit, iblk)*mps_to_cmpdy/c100 + pNit_sk(n) = trcr (i,j, nt_bgc_Nit, iblk) + pflux_NO(n) = flux_bio(i,j,nlt_bgc_Nit, iblk)*mps_to_cmpdy/c100 endif if (tr_bgc_Am) then pAm_sk(n) = trcr (i,j, nt_bgc_Am, iblk) - pflux_Am(n) = flux_bio(i,j,nlt_bgc_Am, iblk)*mps_to_cmpdy/c100 + pflux_Am(n) = flux_bio(i,j,nlt_bgc_Am, iblk)*mps_to_cmpdy/c100 endif if (tr_bgc_Sil) then - pSil_sk(n) = trcr (i,j, nt_bgc_Sil, iblk) + pSil_sk(n) = trcr (i,j, nt_bgc_Sil, iblk) endif if (tr_bgc_hum) then - phum_sk(n) = trcr (i,j, nt_bgc_hum, iblk) - pflux_hum(n)= flux_bio(i,j,nlt_bgc_hum, iblk)*mps_to_cmpdy/c100 + phum_sk(n) = trcr (i,j, nt_bgc_hum, iblk) + pflux_hum(n)= flux_bio(i,j,nlt_bgc_hum, iblk)*mps_to_cmpdy/c100 endif if (tr_bgc_DMS) then pDMSPp_sk(n) = trcr (i,j,nt_bgc_DMSPp,iblk) @@ -419,26 +419,26 @@ subroutine bgc_diags pflux_atm_zaero(n,:) = c0 pflux_snow_zaero(n,:) = c0 if (tr_bgc_Nit) then - pflux_NO(n) = flux_bio(i,j,nlt_bgc_Nit,iblk)*mps_to_cmpdy/c100 - pflux_atm_NO(n) = fbio_atmice(i,j,nlt_bgc_Nit,iblk)*mps_to_cmpdy/c100 + pflux_NO(n) = flux_bio(i,j,nlt_bgc_Nit,iblk)*mps_to_cmpdy/c100 + pflux_atm_NO(n) = fbio_atmice(i,j,nlt_bgc_Nit,iblk)*mps_to_cmpdy/c100 pflux_snow_NO(n) = fbio_snoice(i,j,nlt_bgc_Nit,iblk)*mps_to_cmpdy/c100 endif if (tr_bgc_Am) then - pflux_Am(n) = flux_bio(i,j,nlt_bgc_Am,iblk)*mps_to_cmpdy/c100 - pflux_atm_Am(n) = fbio_atmice(i,j,nlt_bgc_Am,iblk)*mps_to_cmpdy/c100 + pflux_Am(n) = flux_bio(i,j,nlt_bgc_Am,iblk)*mps_to_cmpdy/c100 + pflux_atm_Am(n) = fbio_atmice(i,j,nlt_bgc_Am,iblk)*mps_to_cmpdy/c100 pflux_snow_Am(n) = fbio_snoice(i,j,nlt_bgc_Am,iblk)*mps_to_cmpdy/c100 - endif + endif if (tr_bgc_hum) then - pflux_hum(n) = flux_bio(i,j,nlt_bgc_hum,iblk)*mps_to_cmpdy/c100 + pflux_hum(n) = flux_bio(i,j,nlt_bgc_hum,iblk)*mps_to_cmpdy/c100 endif if (tr_bgc_N) then do k = 1,n_algae - pflux_N(n,k) = flux_bio(i,j,nlt_bgc_N(k),iblk)*mps_to_cmpdy/c100 + pflux_N(n,k) = flux_bio(i,j,nlt_bgc_N(k),iblk)*mps_to_cmpdy/c100 enddo endif if (tr_zaero) then do k = 1,n_zaero - pflux_zaero(n,k) = flux_bio(i,j,nlt_zaero(k),iblk)*mps_to_cmpdy/c100 + pflux_zaero(n,k) = flux_bio(i,j,nlt_zaero(k),iblk)*mps_to_cmpdy/c100 pflux_atm_zaero_s(n,k)= flux_bio_atm(i,j,nlt_zaero(k),iblk)*mps_to_cmpdy/c100 !*aice pflux_atm_zaero(n,k) = fbio_atmice(i,j,nlt_zaero(k),iblk)*mps_to_cmpdy/c100 pflux_snow_zaero(n,k) = fbio_snoice(i,j,nlt_zaero(k),iblk)*mps_to_cmpdy/c100 @@ -465,35 +465,35 @@ subroutine bgc_diags pPON(n,k) = c0 phum(n,k) = c0 pNO(n,k) = c0 - if (tr_bgc_Nit) pNO(n,k) = trcr(i,j,nt_bgc_Nit+k-1,iblk) - if (tr_bgc_Am) pAm(n,k) = trcr(i,j,nt_bgc_Am+k-1,iblk) + if (tr_bgc_Nit) pNO(n,k) = trcr(i,j,nt_bgc_Nit+k-1,iblk) + if (tr_bgc_Am) pAm(n,k) = trcr(i,j,nt_bgc_Am+k-1,iblk) if (tr_bgc_N) then do nn = 1, n_algae pN(n,k,nn) = trcr(i,j,nt_bgc_N(nn)+k-1,iblk) - enddo - endif + enddo + endif if (tr_bgc_C) then do nn = 1, n_doc pDOC(n,k,nn) = trcr(i,j,nt_bgc_DOC(nn)+k-1,iblk) - enddo - endif + enddo + endif if (tr_bgc_DON) then do nn = 1, n_don pDON(n,k,nn) = trcr(i,j,nt_bgc_DON(nn)+k-1,iblk) - enddo - endif + enddo + endif if (tr_bgc_Fe) then do nn = 1, n_fed pFed(n,k,nn) = trcr(i,j,nt_bgc_Fed(nn)+k-1,iblk) - enddo + enddo do nn = 1, n_fep pFep(n,k,nn) = trcr(i,j,nt_bgc_Fep(nn)+k-1,iblk) - enddo - endif + enddo + endif if (tr_zaero) then do nn = 1, n_zaero pzaero(n,k,nn) = trcr(i,j,nt_zaero(nn)+k-1,iblk) - enddo + enddo endif if (tr_bgc_PON) pPON(n,k) = trcr(i,j,nt_bgc_PON+k-1,iblk) if (tr_bgc_hum) phum(n,k) = trcr(i,j,nt_bgc_hum+k-1,iblk) @@ -515,7 +515,7 @@ subroutine bgc_diags pPONs(n,k) = c0 phums(n,k) = c0 pNOs(n,k) = c0 - if (tr_bgc_Nit) pNOs(n,k) = trcr(i,j,nt_bgc_Nit+nblyr+k,iblk) + if (tr_bgc_Nit) pNOs(n,k) = trcr(i,j,nt_bgc_Nit+nblyr+k,iblk) if (tr_bgc_Am) pAms(n,k) = trcr(i,j,nt_bgc_Am+nblyr+k,iblk) if (tr_bgc_N) then do nn = 1, n_algae @@ -533,10 +533,10 @@ subroutine bgc_diags enddo endif if (tr_bgc_Fe ) then - do nn = 1, n_fed + do nn = 1, n_fed pFeds(n,k,nn) = trcr(i,j,nt_bgc_Fed(nn)+nblyr+k,iblk) enddo - do nn = 1, n_fep + do nn = 1, n_fep pFeps(n,k,nn) = trcr(i,j,nt_bgc_Fep(nn)+nblyr+k,iblk) enddo endif @@ -547,7 +547,7 @@ subroutine bgc_diags endif if (tr_bgc_PON)pPONs(n,k) =trcr(i,j,nt_bgc_PON+nblyr+k,iblk) if (tr_bgc_hum)phums(n,k) =trcr(i,j,nt_bgc_hum+nblyr+k,iblk) - enddo !k + enddo !k endif pchlsw(n,:) = c0 pzaerosw(n,:,:) = c0 @@ -560,17 +560,17 @@ subroutine bgc_diags enddo endif enddo - endif ! dEdd_algae + endif ! dEdd_algae endif ! my_task = pmloc - - call broadcast_scalar (pNit_ac (n), pmloc(n)) - call broadcast_scalar (pAm_ac (n), pmloc(n)) - call broadcast_scalar (pSil_ac (n), pmloc(n)) - call broadcast_scalar (phum_ac (n), pmloc(n)) - call broadcast_scalar (pDMSP_ac (n), pmloc(n)) - call broadcast_scalar (pDMS_ac (n), pmloc(n)) - call broadcast_scalar (pflux_NO (n), pmloc(n)) - call broadcast_scalar (pflux_Am (n), pmloc(n)) + + call broadcast_scalar (pNit_ac (n), pmloc(n)) + call broadcast_scalar (pAm_ac (n), pmloc(n)) + call broadcast_scalar (pSil_ac (n), pmloc(n)) + call broadcast_scalar (phum_ac (n), pmloc(n)) + call broadcast_scalar (pDMSP_ac (n), pmloc(n)) + call broadcast_scalar (pDMS_ac (n), pmloc(n)) + call broadcast_scalar (pflux_NO (n), pmloc(n)) + call broadcast_scalar (pflux_Am (n), pmloc(n)) call broadcast_scalar (pflux_hum (n), pmloc(n)) call broadcast_array (pN_ac (n,:), pmloc(n)) call broadcast_array (pflux_N (n,:), pmloc(n)) @@ -578,8 +578,8 @@ subroutine bgc_diags call broadcast_array (pDON_ac (n,:), pmloc(n)) call broadcast_array (pFed_ac (n,:), pmloc(n)) call broadcast_array (pFep_ac (n,:), pmloc(n)) - call broadcast_array (pchlsw (n,:), pmloc(n)) - call broadcast_array (pzaerosw (n,:,:), pmloc(n)) + call broadcast_array (pchlsw (n,:), pmloc(n)) + call broadcast_array (pzaerosw (n,:,:), pmloc(n)) if (skl_bgc) then ! skl_bgc call broadcast_array (pN_sk (n,:), pmloc(n)) call broadcast_array (pDOC_sk (n,:), pmloc(n)) @@ -587,24 +587,24 @@ subroutine bgc_diags call broadcast_array (pFed_sk (n,:), pmloc(n)) call broadcast_array (pFep_sk (n,:), pmloc(n)) - call broadcast_scalar(pNit_sk (n), pmloc(n)) - call broadcast_scalar(pAm_sk (n), pmloc(n)) - call broadcast_scalar(pSil_sk (n), pmloc(n)) - call broadcast_scalar(phum_sk (n), pmloc(n)) - call broadcast_scalar(pDMSPp_sk (n), pmloc(n)) - call broadcast_scalar(pDMSPd_sk (n), pmloc(n)) - call broadcast_scalar(pDMS_sk (n), pmloc(n)) + call broadcast_scalar(pNit_sk (n), pmloc(n)) + call broadcast_scalar(pAm_sk (n), pmloc(n)) + call broadcast_scalar(pSil_sk (n), pmloc(n)) + call broadcast_scalar(phum_sk (n), pmloc(n)) + call broadcast_scalar(pDMSPp_sk (n), pmloc(n)) + call broadcast_scalar(pDMSPd_sk (n), pmloc(n)) + call broadcast_scalar(pDMS_sk (n), pmloc(n)) endif !tr_bgc_sk if (z_tracers) then ! z_bgc - call broadcast_array (pN_tot (n,:), pmloc(n)) - call broadcast_array (pflux_zaero (n,:), pmloc(n)) - call broadcast_array (pflux_atm_zaero_s(n,:), pmloc(n)) - call broadcast_array (pflux_atm_zaero (n,:), pmloc(n)) + call broadcast_array (pN_tot (n,:), pmloc(n)) + call broadcast_array (pflux_zaero (n,:), pmloc(n)) + call broadcast_array (pflux_atm_zaero_s(n,:), pmloc(n)) + call broadcast_array (pflux_atm_zaero (n,:), pmloc(n)) call broadcast_array (pflux_snow_zaero (n,:), pmloc(n)) - call broadcast_scalar(pflux_atm_NO (n), pmloc(n)) - call broadcast_scalar(pflux_atm_Am (n), pmloc(n)) - call broadcast_scalar(pflux_snow_NO (n), pmloc(n)) + call broadcast_scalar(pflux_atm_NO (n), pmloc(n)) + call broadcast_scalar(pflux_atm_Am (n), pmloc(n)) + call broadcast_scalar(pflux_snow_NO (n), pmloc(n)) call broadcast_scalar(pflux_snow_Am (n), pmloc(n)) call broadcast_scalar(pgrow_net (n), pmloc(n)) call broadcast_array (pzfswin (n,:), pmloc(n)) @@ -623,12 +623,12 @@ subroutine bgc_diags call broadcast_array (pAms (n,:), pmloc(n)) call broadcast_array (pPONs (n,:), pmloc(n)) call broadcast_array (phums (n,:), pmloc(n)) - call broadcast_array (pNs (n,:,:), pmloc(n)) - call broadcast_array (pDOCs (n,:,:), pmloc(n)) - call broadcast_array (pDONs (n,:,:), pmloc(n)) - call broadcast_array (pFeds (n,:,:), pmloc(n)) - call broadcast_array (pFeps (n,:,:), pmloc(n)) - call broadcast_array (pzaeros (n,:,:), pmloc(n)) + call broadcast_array (pNs (n,:,:), pmloc(n)) + call broadcast_array (pDOCs (n,:,:), pmloc(n)) + call broadcast_array (pDONs (n,:,:), pmloc(n)) + call broadcast_array (pFeds (n,:,:), pmloc(n)) + call broadcast_array (pFeps (n,:,:), pmloc(n)) + call broadcast_array (pzaeros (n,:,:), pmloc(n)) endif ! z_tracers enddo ! npnt endif ! print_points @@ -649,14 +649,14 @@ subroutine bgc_diags if (z_tracers) then write(nu_diag,803) 'zfswin(1) PAR ','zfswin(2) PAR ' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pzfswin(n,k),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pzfswin(n,k),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' write(nu_diag,803) 'Losses: Zoo(1)(mmol/m^3) ','Zoo(2)' write(nu_diag,803) ' Brine Conc. ',' Brine Conc' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pZoo(n,k),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' - endif + write(nu_diag,802) ((pZoo(n,k),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' + endif if (tr_bgc_Nit) then write(nu_diag,*) '---------------------------------------------------' write(nu_diag,*) ' nitrate conc. (mmol/m^3) or flux (mmol/m^2/d)' @@ -669,17 +669,17 @@ subroutine bgc_diags write(nu_diag,900) 'snow-ice flux = ',pflux_snow_NO(1),pflux_snow_NO(2) write(nu_diag,*) ' snow + ice conc' write(nu_diag,803) ' nitrate(1)',' nitrate(2)' - write(nu_diag,802) ((pNOs(n,k),n=1,2), k = 1,2) - write(nu_diag,802) ((pNO(n,k),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pNOs(n,k),n=1,2), k = 1,2) + write(nu_diag,802) ((pNO(n,k),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif endif if (tr_bgc_PON .and. z_tracers) then write(nu_diag,*) '---------------------------------------------------' write(nu_diag,*) ' PON snow + ice conc. (mmol/m^3)' write(nu_diag,803) ' PON(1)',' PON(2)' - write(nu_diag,802) ((pPONs(n,k),n=1,2), k = 1,2) - write(nu_diag,802) ((pPON(n,k),n=1,2), k = 1,nblyr+1) + write(nu_diag,802) ((pPONs(n,k),n=1,2), k = 1,2) + write(nu_diag,802) ((pPON(n,k),n=1,2), k = 1,nblyr+1) write(nu_diag,*) ' ' endif if (tr_bgc_hum) then @@ -691,8 +691,8 @@ subroutine bgc_diags write(nu_diag,900) 'Bulk ice conc. = ',phum_sk(1),phum_sk(2) elseif (z_tracers) then write(nu_diag,803) ' hum(1)',' hum(2)' - write(nu_diag,802) ((phums(n,k),n=1,2), k = 1,2) - write(nu_diag,802) ((phum(n,k),n=1,2), k = 1,nblyr+1) + write(nu_diag,802) ((phums(n,k),n=1,2), k = 1,2) + write(nu_diag,802) ((phum(n,k),n=1,2), k = 1,nblyr+1) write(nu_diag,*) ' ' endif endif @@ -708,9 +708,9 @@ subroutine bgc_diags write(nu_diag,900) 'snow-ice flux = ',pflux_snow_Am(1),pflux_snow_Am(2) write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' ammonium(1)',' ammonium (2)' - write(nu_diag,802) ((pAms(n,k),n=1,2), k = 1,2) - write(nu_diag,802) ((pAm(n,k),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pAms(n,k),n=1,2), k = 1,2) + write(nu_diag,802) ((pAm(n,k),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif endif if (tr_bgc_N) then @@ -727,9 +727,9 @@ subroutine bgc_diags write(nu_diag,900) 'Tot ice (mmolN/m^2) = ',pN_tot(1,kk),pN_tot(2,kk) write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' algal N(1)',' algal N(2) ' - write(nu_diag,802) ((pNs(n,k,kk),n=1,2), k = 1,2) - write(nu_diag,802) ((pN(n,k,kk),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pNs(n,k,kk),n=1,2), k = 1,2) + write(nu_diag,802) ((pN(n,k,kk),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif enddo endif @@ -744,9 +744,9 @@ subroutine bgc_diags elseif (z_tracers) then write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' DOC(1)',' DOC(2) ' - write(nu_diag,802) ((pDOCs(n,k,kk),n=1,2), k = 1,2) - write(nu_diag,802) ((pDOC(n,k,kk),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pDOCs(n,k,kk),n=1,2), k = 1,2) + write(nu_diag,802) ((pDOC(n,k,kk),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif enddo endif @@ -761,9 +761,9 @@ subroutine bgc_diags elseif (z_tracers) then write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' DON(1)',' DON(2) ' - write(nu_diag,802) ((pDONs(n,k,kk),n=1,2), k = 1,2) - write(nu_diag,802) ((pDON(n,k,kk),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pDONs(n,k,kk),n=1,2), k = 1,2) + write(nu_diag,802) ((pDON(n,k,kk),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif enddo endif @@ -778,9 +778,9 @@ subroutine bgc_diags elseif (z_tracers) then write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' Fed (1)',' Fed (2) ' - write(nu_diag,802) ((pFeds (n,k,kk),n=1,2), k = 1,2) - write(nu_diag,802) ((pFed (n,k,kk),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pFeds (n,k,kk),n=1,2), k = 1,2) + write(nu_diag,802) ((pFed (n,k,kk),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif enddo do kk = 1,n_fep @@ -793,9 +793,9 @@ subroutine bgc_diags elseif (z_tracers) then write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' Fep (1)',' Fep (2) ' - write(nu_diag,802) ((pFeps (n,k,kk),n=1,2), k = 1,2) - write(nu_diag,802) ((pFep (n,k,kk),n=1,2), k = 1,nblyr+1) - write(nu_diag,*) ' ' + write(nu_diag,802) ((pFeps (n,k,kk),n=1,2), k = 1,2) + write(nu_diag,802) ((pFep (n,k,kk),n=1,2), k = 1,nblyr+1) + write(nu_diag,*) ' ' endif enddo endif @@ -807,7 +807,7 @@ subroutine bgc_diags if (skl_bgc) then write(nu_diag,900) 'Ice DMSPp = ',pDMSPp_sk(1),pDMSPp_sk(2) write(nu_diag,900) 'Ice DMSPd = ',pDMSPd_sk(1),pDMSPd_sk(2) - write(nu_diag,900) 'Ice DMS = ',pDMS_sk(1),pDMS_sk(2) + write(nu_diag,900) 'Ice DMS = ',pDMS_sk(1),pDMS_sk(2) endif endif if (tr_zaero .and. z_tracers) then @@ -821,8 +821,8 @@ subroutine bgc_diags write(nu_diag,900) 'snow-ice flux*aice = ',pflux_snow_zaero(1,kk),pflux_snow_zaero(2,kk) write(nu_diag,*) ' snow + ice conc.' write(nu_diag,803) ' aerosol(1)',' aerosol(2) ' - write(nu_diag,802) ((pzaeros(n,k,kk),n=1,2), k = 1,2) - write(nu_diag,802) ((pzaero(n,k,kk),n=1,2), k = 1,nblyr+1) + write(nu_diag,802) ((pzaeros(n,k,kk),n=1,2), k = 1,2) + write(nu_diag,802) ((pzaero(n,k,kk),n=1,2), k = 1,nblyr+1) write(nu_diag,*) ' ' enddo endif @@ -830,23 +830,22 @@ subroutine bgc_diags if (tr_zaero) then do kk = 1,n_zaero write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,*) ' Cat 1 aerosol conc. (kg/m^3) on delta-Eddington grid ' - write(nu_diag,802) ((pzaerosw(n,k,kk),n=1,2), k = 1,klev +1) + write(nu_diag,*) ' Cat 1 aerosol conc. (kg/m^3) on delta-Eddington grid ' + write(nu_diag,802) ((pzaerosw(n,k,kk),n=1,2), k = 1,klev +1) enddo endif if (tr_bgc_N) then write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,*) ' Cat 1 chl (mg/m^3) on delta-Eddington grid ' - write(nu_diag,802) ((pchlsw(n,k),n=1,2), k = 1,klev +1) + write(nu_diag,*) ' Cat 1 chl (mg/m^3) on delta-Eddington grid ' + write(nu_diag,802) ((pchlsw(n,k),n=1,2), k = 1,klev +1) endif endif endif ! print_points - endif ! my_task = master_task + endif ! my_task = master_task 802 format (f24.17,2x,f24.17) 803 format (a25,2x,a25) 900 format (a25,2x,f24.17,2x,f24.17) - 902 format (a25,10x,f6.1,1x,f6.1,9x,f6.1,1x,f6.1) 1020 format (a30,2x,i6) ! integer end subroutine bgc_diags @@ -878,8 +877,8 @@ subroutine zsal_diags ! fields at diagnostic points real (kind=dbl_kind), dimension(npnt) :: & phinS, phinS1,& - phbrn,pdh_top1,pdh_bot1, psice_rho, pfzsal, & - pfzsal_g, pdarcy_V1 + phbrn,pdh_top1,pdh_bot1, psice_rho, pfzsal, & + pfzsal_g, pdarcy_V1 ! vertical fields of category 1 at diagnostic points for bgc layer model real (kind=dbl_kind), dimension(npnt,nblyr+2) :: & @@ -923,10 +922,10 @@ subroutine zsal_diags j = pjloc(n) iblk = pbloc(n) - pfzsal(n) = fzsal(i,j,iblk) - pfzsal_g(n) = fzsal_g(i,j,iblk) - phinS(n) = c0 - phinS1(n) = c0 + pfzsal(n) = fzsal(i,j,iblk) + pfzsal_g(n) = fzsal_g(i,j,iblk) + phinS(n) = c0 + phinS1(n) = c0 phbrn(n) = c0 psice_rho(n) = c0 pdh_top1(n) = c0 @@ -948,7 +947,7 @@ subroutine zsal_diags pdh_top1(n) = dhbr_top(i,j,1,iblk) pdh_bot1(n) = dhbr_bot(i,j,1,iblk) pdarcy_V1(n) = darcy_V(i,j,1,iblk) - endif + endif do k = 1, nblyr+1 pbTiz(n,k) = c0 piDin(n,k) = c0 @@ -958,7 +957,7 @@ subroutine zsal_diags enddo if (vice(i,j,iblk) > c0) then pbTiz(n,k) = pbTiz(n,k)/vice(i,j,iblk) - piDin(n,k) = piDin(n,k)/vice(i,j,iblk) + piDin(n,k) = piDin(n,k)/vice(i,j,iblk) endif enddo ! k do k = 1, nblyr+2 @@ -974,24 +973,24 @@ subroutine zsal_diags enddo do k = 1,nblyr pSin(n,k) = c0 - pSin1(n,k) = c0 - pSin(n,k)= trcr(i,j,nt_bgc_S+k-1,iblk) + pSin1(n,k) = c0 + pSin(n,k)= trcr(i,j,nt_bgc_S+k-1,iblk) if (aicen(i,j,1,iblk) > c0) pSin1(n,k) = trcrn(i,j,nt_bgc_S+k-1,1,iblk) - enddo + enddo do k = 1,nilyr pSice(n,k) = trcr(i,j,nt_sice+k-1,iblk) enddo endif ! my_task = pmloc - call broadcast_scalar(phinS (n), pmloc(n)) - call broadcast_scalar(phinS1 (n), pmloc(n)) - call broadcast_scalar(phbrn (n), pmloc(n)) - call broadcast_scalar(pdh_top1 (n), pmloc(n)) - call broadcast_scalar(pdh_bot1 (n), pmloc(n)) - call broadcast_scalar(psice_rho(n), pmloc(n)) - call broadcast_scalar(pfzsal_g (n), pmloc(n)) - call broadcast_scalar(pdarcy_V1(n), pmloc(n)) - call broadcast_scalar(pfzsal (n), pmloc(n)) + call broadcast_scalar(phinS (n), pmloc(n)) + call broadcast_scalar(phinS1 (n), pmloc(n)) + call broadcast_scalar(phbrn (n), pmloc(n)) + call broadcast_scalar(pdh_top1 (n), pmloc(n)) + call broadcast_scalar(pdh_bot1 (n), pmloc(n)) + call broadcast_scalar(psice_rho(n), pmloc(n)) + call broadcast_scalar(pfzsal_g (n), pmloc(n)) + call broadcast_scalar(pdarcy_V1(n), pmloc(n)) + call broadcast_scalar(pfzsal (n), pmloc(n)) call broadcast_array (pbTiz (n,:), pmloc(n)) call broadcast_array (piDin (n,:), pmloc(n)) call broadcast_array (pphin (n,:), pmloc(n)) @@ -1050,15 +1049,15 @@ subroutine zsal_diags write(nu_diag,*) ' ' write(nu_diag,803) 'zsal(1) cat 1 ','zsal(2) cat 1 ' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pSin1(n,k),n=1,2), k = 1,nblyr) + write(nu_diag,802) ((pSin1(n,k),n=1,2), k = 1,nblyr) write(nu_diag,*) ' ' write(nu_diag,803) 'zsal(1) Avg S ','zsal(2) Avg S ' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pSin(n,k),n=1,2), k = 1,nblyr) + write(nu_diag,802) ((pSin(n,k),n=1,2), k = 1,nblyr) write(nu_diag,*) ' ' write(nu_diag,803) 'Sice(1) Ice S ','Sice(2) Ice S ' write(nu_diag,*) '---------------------------------------------------' - write(nu_diag,802) ((pSice(n,k),n=1,2), k = 1,nilyr) + write(nu_diag,802) ((pSice(n,k),n=1,2), k = 1,nilyr) write(nu_diag,*) ' ' endif ! print_points @@ -1068,7 +1067,6 @@ subroutine zsal_diags 803 format (a25,2x,a25) 900 format (a25,2x,f24.17,2x,f24.17) 902 format (a25,10x,f6.1,1x,f6.1,9x,f6.1,1x,f6.1) - 903 format (a25,5x,i4,1x,i4,1x,i4,1x,i4,7x,i4,1x,i4,1x,i4,1x,i4) end subroutine zsal_diags diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index f3ca9b33e..caaa56295 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -1,24 +1,24 @@ !======================================================================= ! Driver for core history output ! -! The following variables are currently hard-wired as snapshots +! The following variables are currently hard-wired as snapshots ! (instantaneous rather than time-averages): ! divu, shear, sig1, sig2, sigP, trsig, mlt_onset, frz_onset, hisnap, aisnap ! ! Options for histfreq: '1','h','d','m','y','x', where x means that -! output stream will not be used (recommended for efficiency). -! histfreq_n can be any nonnegative integer, where 0 means that the +! output stream will not be used (recommended for efficiency). +! histfreq_n can be any nonnegative integer, where 0 means that the ! corresponding histfreq frequency will not be used. ! The flags (f_) can be set to '1','h','d','m','y' or 'x', where ! n means the field will not be written. To output the same field at -! more than one frequency, for instance monthy and daily, set +! more than one frequency, for instance monthy and daily, set ! f_ = 'md'. ! ! authors Tony Craig and Bruce Briegleb, NCAR ! Elizabeth C. Hunke and William H. Lipscomb, LANL ! C. M. Bitz, UW ! -! 2004 WHL: Block structure added +! 2004 WHL: Block structure added ! 2006 ECH: Accepted some CESM code into mainstream CICE ! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. ! Added histfreq_n and histfreq='h' options, removed histfreq='w' @@ -44,7 +44,7 @@ module ice_history implicit none private public :: init_hist, accum_hist - + !======================================================================= contains @@ -294,7 +294,7 @@ subroutine init_hist (dt) f_yieldstress22 = 'x' endif - ! these must be output at the same frequency because of + ! these must be output at the same frequency because of ! cos(zenith angle) averaging if (f_albice(1:1) /= 'x' .and. f_albsni(1:1) /= 'x') f_albice = f_albsni if (f_albsno(1:1) /= 'x') f_albsno = f_albice @@ -686,7 +686,7 @@ subroutine init_hist (dt) call broadcast_scalar (f_e12, master_task) call broadcast_scalar (f_e22, master_task) call broadcast_scalar (f_s11, master_task) - call broadcast_scalar (f_s12, master_task) + call broadcast_scalar (f_s12, master_task) call broadcast_scalar (f_s22, master_task) call broadcast_scalar (f_yieldstress11, master_task) call broadcast_scalar (f_yieldstress12, master_task) @@ -697,13 +697,13 @@ subroutine init_hist (dt) if (histfreq(ns1) /= 'x') then !!!!! begin example -! call define_hist_field(n_example,"example","m",tstr2D, tcstr, & +! call define_hist_field(n_example,"example","m",tstr2D, tcstr, & ! "example: mean ice thickness", & ! "ice volume per unit grid cell area", c1, c0, & ! ns1, f_example) !!!!! end example - call define_hist_field(n_hi,"hi","m",tstr2D, tcstr, & + call define_hist_field(n_hi,"hi","m",tstr2D, tcstr, & "grid cell mean ice thickness", & "ice volume per unit grid cell area", c1, c0, & ns1, f_hi) @@ -742,12 +742,12 @@ subroutine init_hist (dt) "sea ice speed", & "vector magnitude on E grid", c1, c0, & ns1, f_icespdE) - + call define_hist_field(n_icedirE,"icedirE","deg",estr2D, ecstr, & "sea ice direction", & "vector direction - coming from on E grid", c1, c0, & ns1, f_icedirE) - + call define_hist_field(n_uvelN,"uvelN","m/s",nstr2D, ncstr, & "ice velocity (x)", & "positive is x direction on N grid", c1, c0, & @@ -762,12 +762,12 @@ subroutine init_hist (dt) "sea ice speed", & "vector magnitude on N grid", c1, c0, & ns1, f_icespdN) - + call define_hist_field(n_icedirN,"icedirN","deg",nstr2D, ncstr, & "sea ice direction", & "vector direction - coming from on N grid", c1, c0, & ns1, f_icedirN) - + call define_hist_field(n_uvel,"uvel","m/s",ustr2D, ucstr, & "ice velocity (x)", & "positive is x direction on U grid", c1, c0, & @@ -777,22 +777,22 @@ subroutine init_hist (dt) "ice velocity (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_vvel) - + call define_hist_field(n_icespd,"icespd","m/s",ustr2D, ucstr, & "sea ice speed", & "vector magnitude", c1, c0, & ns1, f_icespd) - + call define_hist_field(n_icedir,"icedir","deg",ustr2D, ucstr, & "sea ice direction", & "vector direction - coming from", c1, c0, & ns1, f_icedir) - + call define_hist_field(n_uatm,"uatm","m/s",str2D_gau, cstr_gau, & "atm velocity (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_uatm) - + call define_hist_field(n_vatm,"vatm","m/s",str2D_gav, cstr_gav, & "atm velocity (y)", & "positive is y direction on U grid", c1, c0, & @@ -802,67 +802,67 @@ subroutine init_hist (dt) "atmosphere wind speed", & "vector magnitude", c1, c0, & ns1, f_atmspd) - + call define_hist_field(n_atmdir,"atmdir","deg",str2D_gau, cstr_gau, & "atmosphere wind direction", & "vector direction - coming from", c1, c0, & ns1, f_atmdir) - + call define_hist_field(n_sice,"sice","ppt",tstr2D, tcstr, & "bulk ice salinity", & "none", c1, c0, & ns1, f_sice) - + call define_hist_field(n_fswup,"fswup","W/m^2",tstr2D, tcstr, & "upward solar flux", & "positive upward", c1, c0, & ns1, f_fswup) - + call define_hist_field(n_fswdn,"fswdn","W/m^2",tstr2D, tcstr, & "down solar flux", & "positive downward", c1, c0, & ns1, f_fswdn) - + call define_hist_field(n_flwdn,"flwdn","W/m^2",tstr2D, tcstr, & "down longwave flux", & "positive downward", c1, c0, & ns1, f_flwdn) - + call define_hist_field(n_snow,"snow","cm/day",tstr2D, tcstr, & "snowfall rate (cpl)", & "none", mps_to_cmpdy/rhofresh, c0, & ns1, f_snow) - + call define_hist_field(n_snow_ai,"snow_ai","cm/day",tstr2D, tcstr, & "snowfall rate", & "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & ns1, f_snow_ai) - + call define_hist_field(n_rain,"rain","cm/day",tstr2D, tcstr, & "rainfall rate (cpl)", & "none", mps_to_cmpdy/rhofresh, c0, & ns1, f_rain) - + call define_hist_field(n_rain_ai,"rain_ai","cm/day",tstr2D, tcstr, & "rainfall rate", & "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & ns1, f_rain_ai) - + call define_hist_field(n_sst,"sst","C",tstr2D, tcstr, & "sea surface temperature", & "none", c1, c0, & ns1, f_sst) - + call define_hist_field(n_sss,"sss","ppt",tstr2D, tcstr, & "sea surface salinity", & "none", c1, c0, & ns1, f_sss) - + call define_hist_field(n_uocn,"uocn","m/s",str2D_gou, cstr_gou, & "ocean current (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_uocn) - + call define_hist_field(n_vocn,"vocn","m/s",str2D_gov, cstr_gov, & "ocean current (y)", & "positive is y direction on U grid", c1, c0, & @@ -872,17 +872,17 @@ subroutine init_hist (dt) "ocean current speed", & "vector magnitude", c1, c0, & ns1, f_ocnspd) - + call define_hist_field(n_ocndir,"ocndir","deg",str2D_gou, cstr_gou, & "ocean current direction", & "vector direction - going to", c1, c0, & ns1, f_ocndir) - + call define_hist_field(n_frzmlt,"frzmlt","W/m^2",tstr2D, tcstr, & "freeze/melt potential", & "if >0, new ice forms; if <0, ice melts", c1, c0, & ns1, f_frzmlt) - + call define_hist_field(n_fswfac,"scale_factor","1",tstr2D, tcstr, & "shortwave scaling factor", & "ratio of netsw new:old", c1, c0, & @@ -897,22 +897,22 @@ subroutine init_hist (dt) "snow/ice/ocn absorbed solar flux (cpl)", & "positive downward", c1, c0, & ns1, f_fswabs) - + call define_hist_field(n_fswabs_ai,"fswabs_ai","W/m^2",tstr2D, tcstr, & "snow/ice/ocn absorbed solar flux", & "weighted by ice area", c1, c0, & ns1, f_fswabs_ai) - + call define_hist_field(n_albsni,"albsni","%",tstr2D, tcstr, & "snow/ice broad band albedo", & "averaged for coszen>0, weighted by aice", c100, c0, & ns1, f_albsni) - + call define_hist_field(n_alvdr,"alvdr","%",tstr2D, tcstr, & "visible direct albedo", & "scaled (divided) by aice", c100, c0, & ns1, f_alvdr) - + call define_hist_field(n_alidr,"alidr","%",tstr2D, tcstr, & "near IR direct albedo", & "scaled (divided) by aice", c100, c0, & @@ -922,7 +922,7 @@ subroutine init_hist (dt) "visible diffuse albedo", & "scaled (divided) by aice", c100, c0, & ns1, f_alvdf) - + call define_hist_field(n_alidf,"alidf","%",tstr2D, tcstr, & "near IR diffuse albedo", & "scaled (divided) by aice", c100, c0, & @@ -932,7 +932,7 @@ subroutine init_hist (dt) "visible direct albedo", & " ", c100, c0, & ns1, f_alvdr_ai) - + call define_hist_field(n_alidr_ai,"alidr_ai","%",tstr2D, tcstr, & "near IR direct albedo", & " ", c100, c0, & @@ -942,7 +942,7 @@ subroutine init_hist (dt) "visible diffuse albedo", & " ", c100, c0, & ns1, f_alvdf_ai) - + call define_hist_field(n_alidf_ai,"alidf_ai","%",tstr2D, tcstr, & "near IR diffuse albedo", & " ", c100, c0, & @@ -952,17 +952,17 @@ subroutine init_hist (dt) "bare ice albedo", & "averaged for coszen>0, weighted by aice", c100, c0, & ns1, f_albice) - + call define_hist_field(n_albsno,"albsno","%",tstr2D, tcstr, & "snow albedo", & "averaged for coszen>0, weighted by aice", c100, c0, & ns1, f_albsno) - + call define_hist_field(n_albpnd,"albpnd","%",tstr2D, tcstr, & "melt pond albedo", & "averaged for coszen>0, weighted by aice", c100, c0, & ns1, f_albpnd) - + call define_hist_field(n_coszen,"coszen","radian",tstr2D, tcstr, & "cosine of the zenith angle", & "negative below horizon", c1, c0, & @@ -972,188 +972,188 @@ subroutine init_hist (dt) "latent heat flux (cpl)", & "positive downward", c1, c0, & ns1, f_flat) - + call define_hist_field(n_flat_ai,"flat_ai","W/m^2",tstr2D, tcstr, & "latent heat flux", & "weighted by ice area", c1, c0, & ns1, f_flat_ai) - + call define_hist_field(n_fsens,"fsens","W/m^2",tstr2D, tcstr, & "sensible heat flux (cpl)", & "positive downward", c1, c0, & ns1, f_fsens) - + call define_hist_field(n_fsens_ai,"fsens_ai","W/m^2",tstr2D, tcstr, & "sensible heat flux", & "weighted by ice area", c1, c0, & ns1, f_fsens_ai) - + call define_hist_field(n_flwup,"flwup","W/m^2",tstr2D, tcstr, & "upward longwave flux (cpl)", & "positive downward", c1, c0, & ns1, f_flwup) - + call define_hist_field(n_flwup_ai,"flwup_ai","W/m^2",tstr2D, tcstr, & "upward longwave flux", & "weighted by ice area", c1, c0, & ns1, f_flwup_ai) - + call define_hist_field(n_evap,"evap","cm/day",tstr2D, tcstr, & "evaporative water flux (cpl)", & "none", mps_to_cmpdy/rhofresh, c0, & ns1, f_evap) - + call define_hist_field(n_evap_ai,"evap_ai","cm/day",tstr2D, tcstr, & "evaporative water flux", & "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & ns1, f_evap_ai) - + call define_hist_field(n_Tair,"Tair","C",tstr2D, tcstr, & "air temperature", & "none", c1, -Tffresh, & ns1, f_Tair) - + call define_hist_field(n_Tref,"Tref","C",tstr2D, tcstr, & "2m reference temperature", & "none", c1, -Tffresh, & ns1, f_Tref) - + call define_hist_field(n_Qref,"Qref","g/kg",tstr2D, tcstr, & "2m reference specific humidity", & "none", kg_to_g, c0, & ns1, f_Qref) - + call define_hist_field(n_congel,"congel","cm/day",tstr2D, tcstr, & "congelation ice growth", & "none", mps_to_cmpdy/dt, c0, & ns1, f_congel) - + call define_hist_field(n_frazil,"frazil","cm/day",tstr2D, tcstr, & "frazil ice growth", & "none", mps_to_cmpdy/dt, c0, & ns1, f_frazil) - + call define_hist_field(n_snoice,"snoice","cm/day",tstr2D, tcstr, & "snow-ice formation", & "none", mps_to_cmpdy/dt, c0, & ns1, f_snoice) - + call define_hist_field(n_dsnow,"dsnow","cm/day",tstr2D, tcstr, & "snow formation", & "none", mps_to_cmpdy/dt, c0, & ns1, f_dsnow) - + call define_hist_field(n_meltt,"meltt","cm/day",tstr2D, tcstr, & "top ice melt", & "none", mps_to_cmpdy/dt, c0, & ns1, f_meltt) - + call define_hist_field(n_melts,"melts","cm/day",tstr2D, tcstr, & "top snow melt", & "none", mps_to_cmpdy/dt, c0, & ns1, f_melts) - + call define_hist_field(n_meltb,"meltb","cm/day",tstr2D, tcstr, & "basal ice melt", & "none", mps_to_cmpdy/dt, c0, & ns1, f_meltb) - + call define_hist_field(n_meltl,"meltl","cm/day",tstr2D, tcstr, & "lateral ice melt", & "none", mps_to_cmpdy/dt, c0, & ns1, f_meltl) - + call define_hist_field(n_fresh,"fresh","cm/day",tstr2D, tcstr, & "freshwtr flx ice to ocn (cpl)", & "if positive, ocean gains fresh water", & mps_to_cmpdy/rhofresh, c0, & ns1, f_fresh) - + call define_hist_field(n_fresh_ai,"fresh_ai","cm/day",tstr2D, tcstr, & "freshwtr flx ice to ocn", & "weighted by ice area", mps_to_cmpdy/rhofresh, c0, & ns1, f_fresh_ai) - + call define_hist_field(n_fsalt,"fsalt","kg/m^2/s",tstr2D, tcstr, & "salt flux ice to ocn (cpl)", & "if positive, ocean gains salt", c1, c0, & ns1, f_fsalt) - + call define_hist_field(n_fsalt_ai,"fsalt_ai","kg/m^2/s",tstr2D, tcstr, & "salt flux ice to ocean", & "weighted by ice area", c1, c0, & ns1, f_fsalt_ai) - + call define_hist_field(n_fbot,"fbot","W/m^2",tstr2D, tcstr, & "heat flux ice to ocean (fbot)", & "if positive, ocean gains heat", c1, c0, & ns1, f_fbot) - + call define_hist_field(n_fhocn,"fhocn","W/m^2",tstr2D, tcstr, & "heat flux ice to ocn (cpl)", & "if positive, ocean gains heat", c1, c0, & ns1, f_fhocn) - + call define_hist_field(n_fhocn_ai,"fhocn_ai","W/m^2",tstr2D, tcstr, & "heat flux ice to ocean (fhocn_ai)", & "weighted by ice area", c1, c0, & ns1, f_fhocn_ai) - + call define_hist_field(n_fswthru,"fswthru","W/m^2",tstr2D, tcstr, & "SW thru ice to ocean (cpl)", & "if positive, ocean gains heat", c1, c0, & ns1, f_fswthru) - + call define_hist_field(n_fswthru_ai,"fswthru_ai","W/m^2",tstr2D, tcstr,& "SW flux thru ice to ocean", & "weighted by ice area", c1, c0, & ns1, f_fswthru_ai) - + call define_hist_field(n_strairx,"strairx","N/m^2",ustr2D, ucstr, & "atm/ice stress (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_strairx) - + call define_hist_field(n_strairy,"strairy","N/m^2",ustr2D, ucstr, & "atm/ice stress (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_strairy) - + call define_hist_field(n_strtltx,"strtltx","N/m^2",ustr2D, ucstr, & "sea sfc tilt stress (x)", & "none", c1, c0, & ns1, f_strtltx) - + call define_hist_field(n_strtlty,"strtlty","N/m^2",ustr2D, ucstr, & "sea sfc tilt stress (y)", & "none", c1, c0, & ns1, f_strtlty) - + call define_hist_field(n_strcorx,"strcorx","N/m^2",ustr2D, ucstr, & "coriolis stress (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_strcorx) - + call define_hist_field(n_strcory,"strcory","N/m^2",ustr2D, ucstr, & "coriolis stress (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_strcory) - + call define_hist_field(n_strocnx,"strocnx","N/m^2",ustr2D, ucstr, & "ocean/ice stress (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_strocnx) - + call define_hist_field(n_strocny,"strocny","N/m^2",ustr2D, ucstr, & "ocean/ice stress (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_strocny) - + call define_hist_field(n_strintx,"strintx","N/m^2",ustr2D, ucstr, & "internal ice stress (x)", & "positive is x direction on U grid", c1, c0, & ns1, f_strintx) - + call define_hist_field(n_strinty,"strinty","N/m^2",ustr2D, ucstr, & "internal ice stress (y)", & "positive is y direction on U grid", c1, c0, & @@ -1168,92 +1168,92 @@ subroutine init_hist (dt) "seabed (basal) stress (y)", & "positive is y direction on U grid", c1, c0, & ns1, f_tauby) - + call define_hist_field(n_strairxN,"strairxN","N/m^2",nstr2D, ncstr, & "atm/ice stress (x)", & "positive is x direction on N grid", c1, c0, & ns1, f_strairxN) - + call define_hist_field(n_strairyN,"strairyN","N/m^2",nstr2D, ncstr, & "atm/ice stress (y)", & "positive is y direction on N grid", c1, c0, & ns1, f_strairyN) - + call define_hist_field(n_strairxE,"strairxE","N/m^2",estr2D, ecstr, & "atm/ice stress (x)", & "positive is x direction on E grid", c1, c0, & ns1, f_strairxE) - + call define_hist_field(n_strairyE,"strairyE","N/m^2",estr2D, ecstr, & "atm/ice stress (y)", & "positive is y direction on E grid", c1, c0, & ns1, f_strairyE) - + call define_hist_field(n_strtltxN,"strtltxN","N/m^2",nstr2D, ncstr, & "sea sfc tilt stress (x)", & "positive is x direction on N grid", c1, c0, & ns1, f_strtltxN) - + call define_hist_field(n_strtltyN,"strtltyN","N/m^2",nstr2D, ncstr, & "sea sfc tilt stress (y)", & "positive is y direction on N grid", c1, c0, & ns1, f_strtltyN) - + call define_hist_field(n_strtltxE,"strtltxE","N/m^2",estr2D, ecstr, & "sea sfc tilt stress (x)", & "positive is x direction on E grid", c1, c0, & ns1, f_strtltxE) - + call define_hist_field(n_strtltyE,"strtltyE","N/m^2",estr2D, ecstr, & "sea sfc tilt stress (y)", & "positive is y direction on E grid", c1, c0, & ns1, f_strtltyE) - + call define_hist_field(n_strcorxN,"strcorxN","N/m^2",nstr2D, ncstr, & "coriolis stress (x)", & "positive is x direction on N grid", c1, c0, & ns1, f_strcorxN) - + call define_hist_field(n_strcoryN,"strcoryN","N/m^2",nstr2D, ncstr, & "coriolis stress (y)", & "positive is y direction on N grid", c1, c0, & ns1, f_strcoryN) - + call define_hist_field(n_strcorxE,"strcorxE","N/m^2",estr2D, ecstr, & "coriolis stress (x)", & "positive is x direction on E grid", c1, c0, & ns1, f_strcorxE) - + call define_hist_field(n_strcoryE,"strcoryE","N/m^2",estr2D, ecstr, & "coriolis stress (y)", & "positive is y direction on E grid", c1, c0, & ns1, f_strcoryE) - + call define_hist_field(n_strocnxN,"strocnxN","N/m^2",nstr2D, ncstr, & "ocean/ice stress (x)", & "positive is x direction on N grid", c1, c0, & ns1, f_strocnxN) - + call define_hist_field(n_strocnyN,"strocnyN","N/m^2",nstr2D, ncstr, & "ocean/ice stress (y)", & "positive is y direction on N grid", c1, c0, & ns1, f_strocnyN) - + call define_hist_field(n_strocnxE,"strocnxE","N/m^2",estr2D, ecstr, & "ocean/ice stress (x)", & "positive is x direction on E grid", c1, c0, & ns1, f_strocnxE) - + call define_hist_field(n_strocnyE,"strocnyE","N/m^2",estr2D, ecstr, & "ocean/ice stress (y)", & "positive is y direction on E grid", c1, c0, & ns1, f_strocnyE) - + call define_hist_field(n_strintxN,"strintxN","N/m^2",nstr2D, ncstr, & "internal ice stress (x)", & "positive is x direction on N grid", c1, c0, & ns1, f_strintxN) - + call define_hist_field(n_strintyN,"strintyN","N/m^2",nstr2D, ncstr, & "internal ice stress (y)", & "positive is y direction on N grid", c1, c0, & @@ -1263,7 +1263,7 @@ subroutine init_hist (dt) "internal ice stress (x)", & "positive is x direction on E grid", c1, c0, & ns1, f_strintxE) - + call define_hist_field(n_strintyE,"strintyE","N/m^2",estr2D, ecstr, & "internal ice stress (y)", & "positive is y direction on E grid", c1, c0, & @@ -1278,7 +1278,7 @@ subroutine init_hist (dt) "seabed (basal) stress (y)", & "positive is y direction on N grid", c1, c0, & ns1, f_taubyN) - + call define_hist_field(n_taubxE,"taubxE","N/m^2",estr2D, ecstr, & "seabed (basal) stress (x)", & "positive is x direction on E grid", c1, c0, & @@ -1288,22 +1288,22 @@ subroutine init_hist (dt) "seabed (basal) stress (y)", & "positive is y direction on E grid", c1, c0, & ns1, f_taubyE) - + call define_hist_field(n_strength,"strength","N/m",tstr2D, tcstr, & "compressive ice strength", & "none", c1, c0, & ns1, f_strength) - + call define_hist_field(n_divu,"divu","%/day",tstr2D, tcstr, & "strain rate (divergence)", & "none", secday*c100, c0, & ns1, f_divu) - + call define_hist_field(n_shear,"shear","%/day",tstr2D, tcstr, & "strain rate (shear)", & "none", secday*c100, c0, & ns1, f_shear) - + select case (grid_ice) case('B') description = ", on U grid (NE corner values)" @@ -1315,42 +1315,42 @@ subroutine init_hist (dt) "norm. principal stress 1", & "sig1 is instantaneous" // trim(description), c1, c0, & ns1, f_sig1) - + call define_hist_field(n_sig2,"sig2","1",ustr2D, ucstr, & "norm. principal stress 2", & "sig2 is instantaneous" // trim(description), c1, c0, & ns1, f_sig2) - + call define_hist_field(n_sigP,"sigP","1",ustr2D, ucstr, & "ice pressure", & "sigP is instantaneous" // trim(description), c1, c0, & ns1, f_sigP) - + call define_hist_field(n_dvidtt,"dvidtt","cm/day",tstr2D, tcstr, & "volume tendency thermo", & "none", mps_to_cmpdy, c0, & ns1, f_dvidtt) - + call define_hist_field(n_dvidtd,"dvidtd","cm/day",tstr2D, tcstr, & "volume tendency dynamics", & "none", mps_to_cmpdy, c0, & ns1, f_dvidtd) - + call define_hist_field(n_daidtt,"daidtt","%/day",tstr2D, tcstr, & "area tendency thermo", & "none", secday*c100, c0, & ns1, f_daidtt) - + call define_hist_field(n_daidtd,"daidtd","%/day",tstr2D, tcstr, & "area tendency dynamics", & "none", secday*c100, c0, & ns1, f_daidtd) - + call define_hist_field(n_dagedtt,"dagedtt","day/day",tstr2D, tcstr, & "age tendency thermo", & "excludes time step increment", c1, c0, & ns1, f_dagedtt) - + call define_hist_field(n_dagedtd,"dagedtd","day/day",tstr2D, tcstr, & "age tendency dynamics", & "excludes time step increment", c1, c0, & @@ -1370,22 +1370,22 @@ subroutine init_hist (dt) "ice volume snapshot", & "none", c1, c0, & ns1, f_hisnap) - + call define_hist_field(n_aisnap,"aisnap","1",tstr2D, tcstr, & "ice area snapshot", & "none", c1, c0, & ns1, f_aisnap) - + call define_hist_field(n_trsig,"trsig","N/m",tstr2D, tcstr, & "internal stress tensor trace", & "ice strength approximation", c1, c0, & ns1, f_trsig) - + call define_hist_field(n_icepresent,"ice_present","1",tstr2D, tcstr, & "fraction of time-avg interval that ice is present", & "ice extent flag", c1, c0, & ns1, f_icepresent) - + call define_hist_field(n_fsurf_ai,"fsurf_ai","W/m^2",tstr2D, tcstr, & "net surface heat flux", & "positive downward, excludes conductive flux, weighted by ice area", & @@ -1477,27 +1477,27 @@ subroutine init_hist (dt) "sea ice thickness", & "volume divided by area", c1, c0, & ns1, f_sithick) - + call define_hist_field(n_siage,"siage","s",tstr2D, tcstr, & "sea ice age", & "none", c1, c0, & ns1, f_siage) - + call define_hist_field(n_sisnthick,"sisnthick","m",tstr2D, tcstr, & "sea ice snow thickness", & "snow volume divided by area", c1, c0, & ns1, f_sisnthick) - + call define_hist_field(n_sitemptop,"sitemptop","K",tstr2D, tcstr, & "sea ice surface temperature", & "none", c1, c0, & ns1, f_sitemptop) - + call define_hist_field(n_sitempsnic,"sitempsnic","K",tstr2D, tcstr, & "snow ice interface temperature", & "surface temperature when no snow present", c1, c0, & ns1, f_sitempsnic) - + call define_hist_field(n_sitempbot,"sitempbot","K",tstr2D, tcstr, & "sea ice bottom temperature", & "none", c1, c0, & @@ -1512,37 +1512,37 @@ subroutine init_hist (dt) "ice y velocity component", & "none", c1, c0, & ns1, f_siv) - + call define_hist_field(n_sidmasstranx,"sidmasstranx","kg/s",ustr2D, ucstr, & "x component of snow and sea ice mass transport", & "none", c1, c0, & ns1, f_sidmasstranx) - + call define_hist_field(n_sidmasstrany,"sidmasstrany","kg/s",ustr2D, ucstr, & "y component of snow and sea ice mass transport", & "none", c1, c0, & ns1, f_sidmasstrany) - + call define_hist_field(n_sistrxdtop,"sistrxdtop","N m-2",ustr2D, ucstr, & "x component of atmospheric stress on sea ice", & "none", c1, c0, & ns1, f_sistrxdtop) - + call define_hist_field(n_sistrydtop,"sistrydtop","N m-2",ustr2D, ucstr, & "y component of atmospheric stress on sea ice", & "none", c1, c0, & ns1, f_sistrydtop) - + call define_hist_field(n_sistrxubot,"sistrxubot","N m-2",ustr2D, ucstr, & "x component of ocean stress on sea ice", & "none", c1, c0, & ns1, f_sistrxubot) - + call define_hist_field(n_sistryubot,"sistryubot","N m-2",ustr2D, ucstr, & "y component of ocean stress on sea ice", & "none", c1, c0, & ns1, f_sistryubot) - + call define_hist_field(n_sicompstren,"sicompstren","N m-1",tstr2D, tcstr, & "compressive sea ice strength", & "none", c1, c0, & @@ -1557,37 +1557,37 @@ subroutine init_hist (dt) "ice direction", & "vector direction - going to", c1, c0, & ns1, f_sidir) - + call define_hist_field(n_sialb,"sialb","1",tstr2D, tcstr, & "sea ice albedo", & "none", c1, c0, & ns1, f_sialb) - + call define_hist_field(n_sihc,"sihc","J m-2",tstr2D, tcstr, & "sea ice heat content", & "none", c1, c0, & ns1, f_sihc) - + call define_hist_field(n_sisnhc,"sisnhc","J m-2",tstr2D, tcstr, & "snow heat content", & "none", c1, c0, & ns1, f_sisnhc) - + call define_hist_field(n_sidconcth,"sidconcth","1/s",tstr2D, tcstr, & "sea ice area change from thermodynamics", & "none", c1, c0, & ns1, f_sidconcth) - + call define_hist_field(n_sidconcdyn,"sidconcdyn","1/s",tstr2D, tcstr, & "sea ice area change from dynamics", & "none", c1, c0, & ns1, f_sidconcdyn) - + call define_hist_field(n_sidmassth,"sidmassth","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change from thermodynamics", & "none", c1, c0, & ns1, f_sidmassth) - + call define_hist_field(n_sidmassdyn,"sidmassdyn","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change from dynamics", & "none", c1, c0, & @@ -1597,37 +1597,37 @@ subroutine init_hist (dt) "sea ice mass change from frazil", & "none", c1, c0, & ns1, f_sidmassgrowthwat) - + call define_hist_field(n_sidmassgrowthbot,"sidmassgrowthbot","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change from basal growth", & "none", c1, c0, & ns1, f_sidmassgrowthbot) - + call define_hist_field(n_sidmasssi,"sidmasssi","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change from snow-ice formation", & "none", c1, c0, & ns1, f_sidmasssi) - + call define_hist_field(n_sidmassevapsubl,"sidmassevapsubl","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change from evaporation and sublimation", & "none", c1, c0, & ns1, f_sidmassevapsubl) - + call define_hist_field(n_sndmasssubl,"sndmassubl","kg m-2 s-1",tstr2D, tcstr, & "snow mass change from evaporation and sublimation", & "none", c1, c0, & ns1, f_sndmasssubl) - + call define_hist_field(n_sidmassmelttop,"sidmassmelttop","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change top melt", & "none", c1, c0, & ns1, f_sidmassmelttop) - + call define_hist_field(n_sidmassmeltbot,"sidmassmeltbot","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change bottom melt", & "none", c1, c0, & ns1, f_sidmassmeltbot) - + call define_hist_field(n_sidmasslat,"sidmasslat","kg m-2 s-1",tstr2D, tcstr, & "sea ice mass change lateral melt", & "none", c1, c0, & @@ -1637,37 +1637,37 @@ subroutine init_hist (dt) "snow mass change from snow fall", & "none", c1, c0, & ns1, f_sndmasssnf) - + call define_hist_field(n_sndmassmelt,"sndmassmelt","kg m-2 s-1",tstr2D, tcstr, & "snow mass change from snow melt", & "none", c1, c0, & ns1, f_sndmassmelt) - + call define_hist_field(n_siflswdtop,"siflswdtop","W/m2",tstr2D, tcstr, & "down shortwave flux over sea ice", & "positive downward", c1, c0, & ns1, f_siflswdtop) - + call define_hist_field(n_siflswutop,"siflswutop","W/m2",tstr2D, tcstr, & "upward shortwave flux over sea ice", & "positive downward", c1, c0, & ns1, f_siflswutop) - + call define_hist_field(n_siflswdbot,"siflswdbot","W/m2",tstr2D, tcstr, & "down shortwave flux at bottom of ice", & "positive downward", c1, c0, & ns1, f_siflswdbot) - + call define_hist_field(n_sifllwdtop,"sifllwdtop","W/m2",tstr2D, tcstr, & "down longwave flux over sea ice", & "positive downward", c1, c0, & ns1, f_sifllwdtop) - + call define_hist_field(n_sifllwutop,"sifllwutop","W/m2",tstr2D, tcstr, & "upward longwave flux over sea ice", & "positive downward", c1, c0, & ns1, f_sifllwutop) - + call define_hist_field(n_siflsenstop,"siflsenstop","W/m2",tstr2D, tcstr, & "sensible heat flux over sea ice", & "positive downward", c1, c0, & @@ -1677,37 +1677,37 @@ subroutine init_hist (dt) "sensible heat flux at bottom of sea ice", & "positive downward", c1, c0, & ns1, f_siflsensupbot) - + call define_hist_field(n_sifllatstop,"sifllatstop","W/m2",tstr2D, tcstr, & "latent heat flux over sea ice", & "positive downward", c1, c0, & ns1, f_sifllatstop) - + call define_hist_field(n_siflcondtop,"siflcondtop","W/m2",tstr2D, tcstr, & "conductive heat flux at top of sea ice", & "positive downward", c1, c0, & ns1, f_siflcondtop) - + call define_hist_field(n_siflcondbot,"siflcondbot","W/m2",tstr2D, tcstr, & "conductive heat flux at bottom of sea ice", & "positive downward", c1, c0, & ns1, f_siflcondbot) - + call define_hist_field(n_sipr,"sipr","kg m-2 s-1",tstr2D, tcstr, & "rainfall over sea ice", & "none", c1, c0, & ns1, f_sipr) - + call define_hist_field(n_sifb,"sifb","m",tstr2D, tcstr, & "sea ice freeboard above sea level", & "none", c1, c0, & ns1, f_sifb) - + call define_hist_field(n_siflsaltbot,"siflsaltbot","kg m-2 s-1",tstr2D, tcstr, & "salt flux from sea ice", & "positive downward", c1, c0, & ns1, f_siflsaltbot) - + call define_hist_field(n_siflfwbot,"siflfwbot","kg m-2 s-1",tstr2D, tcstr, & "fresh water flux from sea ice", & "positive downward", c1, c0, & @@ -1717,37 +1717,37 @@ subroutine init_hist (dt) "fresh water drainage through sea ice", & "positive downward", c1, c0, & ns1, f_siflfwdrain) - + call define_hist_field(n_sidragtop,"sidragtop","1",tstr2D, tcstr, & "atmospheric drag over sea ice", & "none", c1, c0, & ns1, f_sidragtop) - + call define_hist_field(n_sirdgthick,"sirdgthick","m",tstr2D, tcstr, & "sea ice ridge thickness", & "vrdg divided by ardg", c1, c0, & ns1, f_sirdgthick) - + call define_hist_field(n_siforcetiltx,"siforcetiltx","N m-2",tstr2D, tcstr, & "sea surface tilt term", & "none", c1, c0, & ns1, f_siforcetiltx) - + call define_hist_field(n_siforcetilty,"siforcetilty","N m-2",tstr2D, tcstr, & "sea surface tile term", & "none", c1, c0, & ns1, f_siforcetilty) - + call define_hist_field(n_siforcecoriolx,"siforcecoriolx","N m-2",tstr2D, tcstr, & "coriolis term", & "none", c1, c0, & ns1, f_siforcecoriolx) - + call define_hist_field(n_siforcecorioly,"siforcecorioly","N m-2",tstr2D, tcstr, & "coriolis term", & "none", c1, c0, & ns1, f_siforcecorioly) - + call define_hist_field(n_siforceintstrx,"siforceintstrx","N m-2",tstr2D, tcstr, & "internal stress term", & "none", c1, c0, & @@ -1762,7 +1762,7 @@ subroutine init_hist (dt) "average normal stress", & "sistreave is instantaneous", c1, c0, & ns1, f_sistreave) - + call define_hist_field(n_sistremax,"sistremax","N m-1",ustr2D, ucstr, & "maximum shear stress", & "sistremax is instantaneous", c1, c0, & @@ -1797,12 +1797,12 @@ subroutine init_hist (dt) do ns1 = 1, nstreams if (histfreq(ns1) /= 'x') then - call define_hist_field(n_aicen,"aicen","1",tstr3Dc, tcstr, & - "ice area, categories","none", c1, c0, & + call define_hist_field(n_aicen,"aicen","1",tstr3Dc, tcstr, & + "ice area, categories","none", c1, c0, & ns1, f_aicen) - call define_hist_field(n_vicen,"vicen","m",tstr3Dc, tcstr, & - "ice volume, categories","none", c1, c0, & + call define_hist_field(n_vicen,"vicen","m",tstr3Dc, tcstr, & + "ice volume, categories","none", c1, c0, & ns1, f_vicen) call define_hist_field(n_vsnon,"vsnon","m",tstr3Dc, tcstr, & @@ -1814,29 +1814,29 @@ subroutine init_hist (dt) "snow fraction per unit grid cell area", c1, c0, & ns1, f_snowfracn) - call define_hist_field(n_fsurfn_ai,"fsurfn_ai","W/m^2",tstr3Dc, tcstr, & + call define_hist_field(n_fsurfn_ai,"fsurfn_ai","W/m^2",tstr3Dc, tcstr, & "net surface heat flux, categories","weighted by ice area", c1, c0, & ns1, f_fsurfn_ai) - + call define_hist_field(n_fcondtopn_ai,"fcondtopn_ai","W/m^2",tstr3Dc, tcstr, & "top sfc conductive heat flux, cat","weighted by ice area", c1, c0, & ns1, f_fcondtopn_ai) - call define_hist_field(n_fmelttn_ai,"fmelttn_ai","W/m^2",tstr3Dc, tcstr, & - "net sfc heat flux causing melt, cat","weighted by ice area", c1, c0, & + call define_hist_field(n_fmelttn_ai,"fmelttn_ai","W/m^2",tstr3Dc, tcstr, & + "net sfc heat flux causing melt, cat","weighted by ice area", c1, c0, & ns1, f_fmelttn_ai) - call define_hist_field(n_flatn_ai,"flatn_ai","W/m^2",tstr3Dc, tcstr, & - "latent heat flux, category","weighted by ice area", c1, c0, & + call define_hist_field(n_flatn_ai,"flatn_ai","W/m^2",tstr3Dc, tcstr, & + "latent heat flux, category","weighted by ice area", c1, c0, & ns1, f_flatn_ai) - call define_hist_field(n_fsensn_ai,"fsensn_ai","W/m^2",tstr3Dc, tcstr, & - "sensible heat flux, category","weighted by ice area", c1, c0, & + call define_hist_field(n_fsensn_ai,"fsensn_ai","W/m^2",tstr3Dc, tcstr, & + "sensible heat flux, category","weighted by ice area", c1, c0, & ns1, f_fsensn_ai) call define_hist_field(n_keffn_top,"keffn_top","W/m^2/K",tstr3Dc, tcstr, & "effective thermal conductivity of the top ice layer, categories", & - "multilayer scheme", c1, c0, & + "multilayer scheme", c1, c0, & ns1, f_keffn_top) ! CMIP 3D @@ -1876,16 +1876,16 @@ subroutine init_hist (dt) ! do ns1 = 1, nstreams ! if (histfreq(ns1) /= 'x') then -! call define_hist_field(n_field3dz,"field3dz","1",tstr3Dz, tcstr, & +! call define_hist_field(n_field3dz,"field3dz","1",tstr3Dz, tcstr, & ! "example 3dz field", & ! "vertical profile", c1, c0, & ! ns1, f_field3dz) ! endif ! if (histfreq(ns1) /= 'x') then -! enddo ! ns1 +! enddo ! ns1 ! biogeochemistry - call init_hist_bgc_3Db + call init_hist_bgc_3Db call init_hist_bgc_3Da !----------------------------------------------------------------- @@ -1902,12 +1902,12 @@ subroutine init_hist (dt) do ns1 = 1, nstreams if (histfreq(ns1) /= 'x') then - call define_hist_field(n_Tinz,"Tinz","C",tstr4Di, tcstr, & + call define_hist_field(n_Tinz,"Tinz","C",tstr4Di, tcstr, & "ice internal temperatures on CICE grid", & "vertical profile", c1, c0, & ns1, f_Tinz) - call define_hist_field(n_Sinz,"Sinz","ppt",tstr4Di, tcstr, & + call define_hist_field(n_Sinz,"Sinz","ppt",tstr4Di, tcstr, & "ice internal bulk salinity", & "vertical profile", c1, c0, & ns1, f_Sinz) @@ -1918,7 +1918,7 @@ subroutine init_hist (dt) do ns1 = 1, nstreams if (histfreq(ns1) /= 'x') then - call define_hist_field(n_Tsnz,"Tsnz","C",tstr4Ds, tcstr, & + call define_hist_field(n_Tsnz,"Tsnz","C",tstr4Ds, tcstr, & "snow internal temperatures", & "vertical profile", c1, c0, & ns1, f_Tsnz) @@ -2071,8 +2071,8 @@ subroutine init_hist (dt) if (restart .and. yday >= c2) then ! restarting midyear gives erroneous onset dates - mlt_onset = 999._dbl_kind - frz_onset = 999._dbl_kind + mlt_onset = 999._dbl_kind + frz_onset = 999._dbl_kind else mlt_onset = c0 frz_onset = c0 @@ -2150,7 +2150,7 @@ subroutine accum_hist (dt) ravgct , & ! 1/avgct ravgctz ! 1/avgct - real (kind=dbl_kind) :: & + real (kind=dbl_kind) :: & qn , & ! temporary variable for enthalpy sn ! temporary variable for salinity @@ -2208,7 +2208,7 @@ subroutine accum_hist (dt) if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & a2D(:,:,n,:) = c0 enddo - do n = n2D + 1, n3Dccum + do n = n2D + 1, n3Dccum nn = n - n2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) & a3Dc(:,:,:,nn,:) = c0 @@ -2267,7 +2267,7 @@ subroutine accum_hist (dt) !$OMP worka,workb,worka3,Tinz4d,Sinz4d,Tsnz4d) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -2526,7 +2526,7 @@ subroutine accum_hist (dt) call accum_hist_field(n_fswthru, iblk, fswthru(:,:,iblk), a2D) if (f_fswthru_ai(1:1)/= 'x') & call accum_hist_field(n_fswthru_ai,iblk, fswthru_ai(:,:,iblk), a2D) - + if (f_strairx(1:1) /= 'x') & call accum_hist_field(n_strairx, iblk, strairxU(:,:,iblk), a2D) if (f_strairy(1:1) /= 'x') & @@ -3219,7 +3219,7 @@ subroutine accum_hist (dt) if ( ktherm == 2) then dfresh = -rhoi*(frazil(i,j,iblk)-frazil_diag(i,j,iblk))/dt else - dfresh = -rhoi*frazil(i,j,iblk)/dt + dfresh = -rhoi*frazil(i,j,iblk)/dt endif endif dfsalt = ice_ref_salinity*p001*dfresh @@ -3241,7 +3241,7 @@ subroutine accum_hist (dt) if ( ktherm == 2) then dfresh = -rhoi*(frazil(i,j,iblk)-frazil_diag(i,j,iblk))/dt else - dfresh = -rhoi*frazil(i,j,iblk)/dt + dfresh = -rhoi*frazil(i,j,iblk)/dt endif endif worka(i,j) = aice(i,j,iblk)*(fresh(i,j,iblk)+dfresh) @@ -3395,7 +3395,7 @@ subroutine accum_hist (dt) if (f_fsensn_ai (1:1) /= 'x') & call accum_hist_field(n_fsensn_ai-n2D, iblk, ncat_hist, & fsensn(:,:,1:ncat_hist,iblk)*aicen_init(:,:,1:ncat_hist,iblk), a3Dc) - ! Calculate surface heat flux that causes melt (calculated by the + ! Calculate surface heat flux that causes melt (calculated by the ! atmos in HadGEM3 so needed for checking purposes) if (f_fmelttn_ai (1:1) /= 'x') & call accum_hist_field(n_fmelttn_ai-n2D, iblk, ncat_hist, & @@ -3484,7 +3484,7 @@ subroutine accum_hist (dt) call accum_hist_field(n_Sinz-n3Dfcum, iblk, nzilyr, ncat_hist, & Sinz4d(:,:,1:nzilyr,1:ncat_hist), a4Di) endif - + endif ! if (allocated(a3Dc)) if (allocated(a4Ds)) then @@ -3504,7 +3504,7 @@ subroutine accum_hist (dt) call accum_hist_field(n_Tsnz-n4Dicum, iblk, nzslyr, ncat_hist, & Tsnz4d(:,:,1:nzslyr,1:ncat_hist), a4Ds) endif - + endif ! if (allocated(a4Ds)) if (allocated(a3Dc) .and. allocated(a2D)) then @@ -3528,7 +3528,7 @@ subroutine accum_hist (dt) enddo endif - endif + endif !--------------------------------------------------------------- ! accumulate other history output !--------------------------------------------------------------- @@ -3569,14 +3569,14 @@ subroutine accum_hist (dt) if (write_history(ns) .or. write_ic) then !--------------------------------------------------------------- - ! Mask out land points and convert units + ! Mask out land points and convert units !--------------------------------------------------------------- ravgct = c1/avgct(ns) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & !$OMP n,nn,ravgctz,ravgip,ravgipn) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -3611,7 +3611,7 @@ subroutine accum_hist (dt) endif do n = 1, num_avail_hist_fields_2D - if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) then do j = jlo, jhi do i = ilo, ihi @@ -4125,7 +4125,7 @@ subroutine accum_hist (dt) if (avail_hist_fields(n)%vname(1:6) == 'albice') then do j = jlo, jhi do i = ilo, ihi - if (tmask(i,j,iblk)) then + if (tmask(i,j,iblk)) then ravgctz = c0 if (albcnt(i,j,iblk,ns) > puny) & ravgctz = c1/albcnt(i,j,iblk,ns) @@ -4145,7 +4145,7 @@ subroutine accum_hist (dt) if (avail_hist_fields(n)%vname(1:6) == 'albsni') then do j = jlo, jhi do i = ilo, ihi - if (tmask(i,j,iblk)) then + if (tmask(i,j,iblk)) then ravgctz = c0 if (albcnt(i,j,iblk,ns) > puny) & ravgctz = c1/albcnt(i,j,iblk,ns) @@ -4159,7 +4159,7 @@ subroutine accum_hist (dt) if (avail_hist_fields(n)%vname(1:8) == 'alvdr_ai') then do j = jlo, jhi do i = ilo, ihi - if (tmask(i,j,iblk)) then + if (tmask(i,j,iblk)) then ravgctz = c0 if (albcnt(i,j,iblk,ns) > puny) & ravgctz = c1/albcnt(i,j,iblk,ns) @@ -4216,7 +4216,7 @@ subroutine accum_hist (dt) do n = 1, num_avail_hist_fields_3Dc nn = n2D + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, ncat_hist do j = jlo, jhi @@ -4265,7 +4265,7 @@ subroutine accum_hist (dt) do n = 1, num_avail_hist_fields_3Dz nn = n3Dccum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzilyr do j = jlo, jhi @@ -4283,7 +4283,7 @@ subroutine accum_hist (dt) enddo ! n do n = 1, num_avail_hist_fields_3Db nn = n3Dzcum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzblyr do j = jlo, jhi do i = ilo, ihi @@ -4301,7 +4301,7 @@ subroutine accum_hist (dt) do n = 1, num_avail_hist_fields_3Da nn = n3Dbcum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzalyr do j = jlo, jhi do i = ilo, ihi @@ -4337,7 +4337,7 @@ subroutine accum_hist (dt) do n = 1, num_avail_hist_fields_4Di nn = n3Dfcum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzilyr do ic = 1, ncat_hist do j = jlo, jhi @@ -4357,7 +4357,7 @@ subroutine accum_hist (dt) do n = 1, num_avail_hist_fields_4Ds nn = n4Dicum + n - if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then + if (avail_hist_fields(nn)%vhistfreq == histfreq(ns)) then do k = 1, nzslyr do ic = 1, ncat_hist do j = jlo, jhi @@ -4460,7 +4460,7 @@ subroutine accum_hist (dt) if (n_sig2 (ns) /= 0) a2D(i,j,n_sig2(ns),iblk) = & sig2 (i,j,iblk)*avail_hist_fields(n_sig2(ns))%cona if (n_sigP (ns) /= 0) a2D(i,j,n_sigP(ns),iblk) = & - sigP (i,j,iblk)*avail_hist_fields(n_sigP(ns))%cona + sigP (i,j,iblk)*avail_hist_fields(n_sigP(ns))%cona if (n_sistreave(ns) /= 0) a2D(i,j,n_sistreave(ns),iblk) = & p5*(sig1(i,j,iblk)+sig2(i,j,iblk))*avail_hist_fields(n_sistreave(ns))%cona if (n_sistremax(ns) /= 0) a2D(i,j,n_sistremax(ns),iblk) = & @@ -4557,8 +4557,8 @@ subroutine accum_hist (dt) do n = 1,n2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a2D(:,:,n,:) = c0 enddo - do n = n2D + 1, n3Dccum - nn = n - n2D + do n = n2D + 1, n3Dccum + nn = n - n2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns)) a3Dc(:,:,:,nn,:) = c0 enddo do n = n3Dccum + 1, n3Dzcum @@ -4595,7 +4595,7 @@ subroutine accum_hist (dt) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -4606,13 +4606,13 @@ subroutine accum_hist (dt) do i=ilo,ihi ! reset NH Jan 1 if (lmask_n(i,j,iblk)) mlt_onset(i,j,iblk) = c0 - ! reset SH Jan 1 + ! reset SH Jan 1 if (lmask_s(i,j,iblk)) frz_onset(i,j,iblk) = c0 enddo enddo endif ! new_year - if ( (mmonth .eq. 7) .and. new_month ) then + if ( (mmonth .eq. 7) .and. new_month ) then do j=jlo,jhi do i=ilo,ihi ! reset SH Jul 1 diff --git a/cicecore/cicedynB/analysis/ice_history_bgc.F90 b/cicecore/cicedynB/analysis/ice_history_bgc.F90 index 8802cf431..003e76120 100644 --- a/cicecore/cicedynB/analysis/ice_history_bgc.F90 +++ b/cicecore/cicedynB/analysis/ice_history_bgc.F90 @@ -21,74 +21,74 @@ module ice_history_bgc icepack_query_tracer_indices, icepack_query_parameters, & icepack_query_parameters use ice_domain_size, only: max_nstrm, n_iso, n_aero, & - n_algae, n_dic, n_doc, n_don, n_zaero, n_fed, n_fep + n_algae, n_dic, n_doc, n_don, n_zaero, n_fed, n_fep implicit none private public :: init_hist_bgc_2D, init_hist_bgc_3Dc, & init_hist_bgc_3Db, init_hist_bgc_3Da,& accum_hist_bgc, init_history_bgc - + !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value !--------------------------------------------------------------- ! specified in input_templates !-------------------------------------------------------------- - character (len=max_nstrm), public :: & + character (len=max_nstrm), public :: & f_fiso_atm = 'x', f_fiso_ocn = 'x', & f_iso = 'x', & f_faero_atm = 'x', f_faero_ocn = 'x', & f_aero = 'x', & - f_fzsal = 'm', f_fzsal_ai = 'm', & + f_fzsal = 'm', f_fzsal_ai = 'm', & f_fzsal_g = 'm', f_fzsal_g_ai = 'm', & f_zsal = 'x', & - f_fbio = 'x', f_fbio_ai = 'x', & - f_zaero = 'x', f_bgc_S = 'x', & + f_fbio = 'x', f_fbio_ai = 'x', & + f_zaero = 'x', f_bgc_S = 'x', & f_bgc_N = 'x', f_bgc_C = 'x', & f_bgc_DOC = 'x', f_bgc_DIC = 'x', & f_bgc_chl = 'x', f_bgc_Nit = 'x', & f_bgc_Am = 'x', f_bgc_Sil = 'x', & f_bgc_DMSPp = 'x', f_bgc_DMSPd = 'x', & - f_bgc_DMS = 'x', f_bgc_DON = 'x', & + f_bgc_DMS = 'x', f_bgc_DON = 'x', & f_bgc_Fe = 'x', f_bgc_hum = 'x', & f_bgc_PON = 'x', f_bgc_ml = 'x', & - f_upNO = 'x', f_upNH = 'x', & - f_bTin = 'x', f_bphi = 'x', & - f_iDi = 'x', f_iki = 'x', & + f_upNO = 'x', f_upNH = 'x', & + f_bTin = 'x', f_bphi = 'x', & + f_iDi = 'x', f_iki = 'x', & f_fbri = 'x', f_hbri = 'x', & - f_zfswin = 'x', f_grownet = 'x', & - f_bionet = 'x', f_biosnow = 'x', & + f_zfswin = 'x', f_grownet = 'x', & + f_bionet = 'x', f_biosnow = 'x', & f_PPnet = 'x', f_algalpeak = 'x', & f_zbgc_frac = 'x', & !------------------------------------------------ ! specified by combinations of above values !------------------------------------------------- f_bgc_Fed = 'x', f_bgc_Fep = 'x', & - f_DONnet = 'x', & - f_DICnet = 'x', f_DOCnet = 'x', & - f_chlnet = 'x', f_Nitnet = 'x', & - f_Amnet = 'x', f_Cnet = 'x', & - f_Nnet = 'x', f_DMSPpnet = 'x', & - f_DMSPdnet = 'x', f_DMSnet = 'x', & - f_Fednet = 'x', f_Fepnet = 'x', & + f_DONnet = 'x', & + f_DICnet = 'x', f_DOCnet = 'x', & + f_chlnet = 'x', f_Nitnet = 'x', & + f_Amnet = 'x', f_Cnet = 'x', & + f_Nnet = 'x', f_DMSPpnet = 'x', & + f_DMSPdnet = 'x', f_DMSnet = 'x', & + f_Fednet = 'x', f_Fepnet = 'x', & f_Silnet = 'x', f_PONnet = 'x', & - f_zaeronet = 'x', f_humnet = 'x', & - f_chlsnow = 'x', f_Nitsnow = 'x', & - f_Amsnow = 'x', f_Csnow = 'x', & - f_Nsnow = 'x', f_DMSPpsnow = 'x', & - f_DMSPdsnow = 'x', f_DMSsnow = 'x', & - f_Fedsnow = 'x', f_Fepsnow = 'x', & - f_Silsnow = 'x', f_PONsnow = 'x', & + f_zaeronet = 'x', f_humnet = 'x', & + f_chlsnow = 'x', f_Nitsnow = 'x', & + f_Amsnow = 'x', f_Csnow = 'x', & + f_Nsnow = 'x', f_DMSPpsnow = 'x', & + f_DMSPdsnow = 'x', f_DMSsnow = 'x', & + f_Fedsnow = 'x', f_Fepsnow = 'x', & + f_Silsnow = 'x', f_PONsnow = 'x', & f_humsnow = 'x', & - f_DICsnow = 'x', f_DOCsnow = 'x', & + f_DICsnow = 'x', f_DOCsnow = 'x', & f_DONsnow = 'x', f_zaerosnow = 'x', & - f_chlfrac = 'x', f_Nitfrac = 'x', & - f_Amfrac = 'x', & - f_Nfrac = 'x', f_DMSPpfrac = 'x', & - f_DMSPdfrac = 'x', f_DMSfrac = 'x', & - f_Silfrac = 'x', f_PONfrac = 'x', & + f_chlfrac = 'x', f_Nitfrac = 'x', & + f_Amfrac = 'x', & + f_Nfrac = 'x', f_DMSPpfrac = 'x', & + f_DMSPdfrac = 'x', f_DMSfrac = 'x', & + f_Silfrac = 'x', f_PONfrac = 'x', & f_humfrac = 'x', & - f_DICfrac = 'x', f_DOCfrac = 'x', & + f_DICfrac = 'x', f_DOCfrac = 'x', & f_DONfrac = 'x', f_zaerofrac = 'x', & f_Fedfrac = 'x', f_Fepfrac = 'x', & f_fNit = 'x', f_fNit_ai = 'x', & @@ -99,13 +99,13 @@ module ice_history_bgc f_fDON = 'x', f_fDON_ai = 'x', & f_fFed = 'x', f_fFed_ai = 'x', & f_fFep = 'x', f_fFep_ai = 'x', & - f_fSil = 'x', f_fSil_ai = 'x', & - f_fPON = 'x', f_fPON_ai = 'x', & - f_fhum = 'x', f_fhum_ai = 'x', & - f_fDMSPp = 'x', f_fDMSPp_ai = 'x', & - f_fDMSPd = 'x', f_fDMSPd_ai = 'x', & - f_fDMS = 'x', f_fDMS_ai = 'x', & - f_fzaero = 'x', f_fzaero_ai = 'x', & + f_fSil = 'x', f_fSil_ai = 'x', & + f_fPON = 'x', f_fPON_ai = 'x', & + f_fhum = 'x', f_fhum_ai = 'x', & + f_fDMSPp = 'x', f_fDMSPp_ai = 'x', & + f_fDMSPd = 'x', f_fDMSPd_ai = 'x', & + f_fDMS = 'x', f_fDMS_ai = 'x', & + f_fzaero = 'x', f_fzaero_ai = 'x', & f_bgc_Sil_ml = 'x', & f_bgc_Nit_ml = 'x', f_bgc_Am_ml = 'x', & f_bgc_DMSP_ml = 'x', f_bgc_DMS_ml = 'x', & @@ -140,12 +140,12 @@ module ice_history_bgc f_bgc_DMS , f_bgc_DON , & f_bgc_Fe , f_bgc_hum , & f_bgc_PON , f_bgc_ml , & - f_upNO , f_upNH , & + f_upNO , f_upNH , & f_bTin , f_bphi , & - f_iDi , f_iki , & + f_iDi , f_iki , & f_fbri , f_hbri , & - f_zfswin , f_grownet , & - f_bionet , f_biosnow , & + f_zfswin , f_grownet , & + f_bionet , f_biosnow , & f_PPnet , f_algalpeak , & f_zbgc_frac @@ -154,9 +154,9 @@ module ice_history_bgc !--------------------------------------------------------------- integer (kind=int_kind), dimension(max_nstrm), public :: & - n_fzsal , n_fzsal_ai , & - n_fzsal_g , n_fzsal_g_ai , & - n_zsal + n_fzsal , n_fzsal_ai , & + n_fzsal_g , n_fzsal_g_ai , & + n_zsal integer(kind=int_kind), dimension(icepack_max_iso,max_nstrm) :: & n_fiso_atm , & @@ -216,7 +216,7 @@ module ice_history_bgc n_bgc_Fed_cat1, n_bgc_Fep_cat1 integer(kind=int_kind), dimension(max_nstrm) :: & - n_bgc_S , & + n_bgc_S , & n_fNit , n_fNit_ai , & n_fAm , n_fAm_ai , & n_fSil , n_fSil_ai , & @@ -233,25 +233,25 @@ module ice_history_bgc n_bgc_hum_ml , & n_bgc_Nit_ml , n_bgc_Am_ml , & n_bgc_DMSP_ml , n_bgc_DMS_ml , & - n_upNO , n_upNH , & + n_upNO , n_upNH , & n_bTin , n_bphi , & n_iDi , n_iki , & n_bgc_PON , & n_fbri , n_hbri , & - n_zfswin , n_Nitnet , & - n_Amnet , n_Silnet , & + n_zfswin , n_Nitnet , & + n_Amnet , n_Silnet , & n_humnet , & - n_DMSPpnet , n_DMSPdnet , & - n_DMSnet , n_PONnet , & + n_DMSPpnet , n_DMSPdnet , & + n_DMSnet , n_PONnet , & n_Nitsnow , n_Amsnow , & n_Silsnow , n_humsnow , & - n_DMSPpsnow , n_DMSPdsnow , & - n_DMSsnow , n_PONsnow , & + n_DMSPpsnow , n_DMSPdsnow , & + n_DMSsnow , n_PONsnow , & n_Nitfrac , n_Amfrac , & n_Silfrac , & n_humfrac , & - n_DMSPpfrac , n_DMSPdfrac , & - n_DMSfrac , n_PONfrac , & + n_DMSPpfrac , n_DMSPdfrac , & + n_DMSfrac , n_PONfrac , & n_grownet , n_PPnet , & n_bgc_Nit_cat1, n_bgc_Am_cat1 , & n_bgc_Sil_cat1, n_bgc_DMSPd_cat1,& @@ -295,7 +295,7 @@ subroutine init_hist_bgc_2D tr_bgc_PON_out=tr_bgc_PON, & tr_bgc_N_out =tr_bgc_N, tr_bgc_C_out =tr_bgc_C, & tr_bgc_chl_out=tr_bgc_chl, tr_bgc_DON_out=tr_bgc_DON, & - tr_bgc_Fe_out =tr_bgc_Fe, tr_bgc_hum_out=tr_bgc_hum ) + tr_bgc_Fe_out =tr_bgc_Fe, tr_bgc_hum_out=tr_bgc_hum ) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) @@ -336,14 +336,14 @@ subroutine init_hist_bgc_2D if (.not. tr_aero) then f_faero_atm = 'x' f_faero_ocn = 'x' - f_aero = 'x' + f_aero = 'x' endif - + if (.not. tr_brine) then f_fbri = 'x' f_hbri = 'x' endif - + f_zaeronet = f_bionet f_zaerosnow = f_biosnow f_zaerofrac = f_zbgc_frac @@ -352,7 +352,7 @@ subroutine init_hist_bgc_2D if (.not. tr_zaero) then f_zaero = 'x' - f_fzaero = 'x' + f_fzaero = 'x' f_fzaero_ai = 'x' f_zaeronet = 'x' f_zaerosnow = 'x' @@ -396,7 +396,7 @@ subroutine init_hist_bgc_2D f_DMSPdnet = f_bionet f_DMSnet = f_bionet f_PONnet = f_bionet - + f_Nitsnow = f_biosnow f_Amsnow = f_biosnow f_Nsnow = f_biosnow @@ -466,7 +466,7 @@ subroutine init_hist_bgc_2D f_fDMSPd_ai = f_fbio_ai f_fDMS_ai = f_fbio_ai - if (.not. tr_bgc_N) then + if (.not. tr_bgc_N) then f_bgc_N = 'x' f_bgc_N_ml = 'x' f_fN = 'x' @@ -478,8 +478,8 @@ subroutine init_hist_bgc_2D endif f_peakval = f_algalpeak - if (.not. tr_bgc_Nit) then - f_upNO = 'x' + if (.not. tr_bgc_Nit) then + f_upNO = 'x' f_bgc_Nit = 'x' f_bgc_Nit_ml= 'x' f_fNit = 'x' @@ -511,8 +511,8 @@ subroutine init_hist_bgc_2D f_chlsnow = 'x' f_chlfrac = 'x' endif - if (.not. tr_bgc_Am) then - f_upNH = 'x' + if (.not. tr_bgc_Am) then + f_upNH = 'x' f_bgc_Am = 'x' f_bgc_Am_ml = 'x' f_fAm = 'x' @@ -560,8 +560,8 @@ subroutine init_hist_bgc_2D f_DMSfrac = 'x' f_DMSPpfrac = 'x' f_DMSPdfrac = 'x' - endif - if (.not. tr_bgc_DON) then + endif + if (.not. tr_bgc_DON) then f_bgc_DON = 'x' f_bgc_DON_ml = 'x' f_DONsnow = 'x' @@ -569,8 +569,8 @@ subroutine init_hist_bgc_2D f_DONnet = 'x' f_fDON = 'x' f_fDON_ai = 'x' - endif - if (.not. tr_bgc_Fe ) then + endif + if (.not. tr_bgc_Fe ) then f_bgc_Fe = 'x' f_bgc_Fed = 'x' f_bgc_Fed_ml = 'x' @@ -587,7 +587,7 @@ subroutine init_hist_bgc_2D f_fFep = 'x' f_fFep_ai = 'x' endif - if (.not. tr_bgc_PON .or. skl_bgc) then + if (.not. tr_bgc_PON .or. skl_bgc) then f_bgc_PON = 'x' f_PONsnow = 'x' f_PONfrac = 'x' @@ -595,19 +595,19 @@ subroutine init_hist_bgc_2D f_fPON = 'x' f_fPON_ai = 'x' endif - - f_bgc_Nit_cat1 = f_bgc_Nit - f_bgc_Am_cat1 = f_bgc_Am + + f_bgc_Nit_cat1 = f_bgc_Nit + f_bgc_Am_cat1 = f_bgc_Am f_bgc_N_cat1 = f_bgc_N f_bgc_DOC_cat1 = f_bgc_DOC f_bgc_DIC_cat1 = f_bgc_DIC f_bgc_DON_cat1 = f_bgc_DON - f_bgc_Fed_cat1 = f_bgc_Fe - f_bgc_Fep_cat1 = f_bgc_Fe - f_bgc_Sil_cat1 = f_bgc_Sil - f_bgc_hum_cat1 = f_bgc_hum + f_bgc_Fed_cat1 = f_bgc_Fe + f_bgc_Fep_cat1 = f_bgc_Fe + f_bgc_Sil_cat1 = f_bgc_Sil + f_bgc_hum_cat1 = f_bgc_hum f_bgc_DMSPd_cat1 = f_bgc_DMSPd - f_bgc_DMS_cat1 = f_bgc_DMS + f_bgc_DMS_cat1 = f_bgc_DMS f_bgc_PON_cat1 = f_bgc_PON if (solve_zsal) then @@ -711,73 +711,73 @@ subroutine init_hist_bgc_2D call broadcast_scalar (f_bgc_Sil_ml, master_task) call broadcast_scalar (f_bgc_hum_ml, master_task) call broadcast_scalar (f_bgc_DMSP_ml, master_task) - call broadcast_scalar (f_bgc_DMS_ml, master_task) - call broadcast_scalar (f_bgc_DON_ml, master_task) - call broadcast_scalar (f_bgc_Fed_ml, master_task) - call broadcast_scalar (f_bgc_Fep_ml, master_task) - call broadcast_scalar (f_upNO, master_task) - call broadcast_scalar (f_upNH, master_task) + call broadcast_scalar (f_bgc_DMS_ml, master_task) + call broadcast_scalar (f_bgc_DON_ml, master_task) + call broadcast_scalar (f_bgc_Fed_ml, master_task) + call broadcast_scalar (f_bgc_Fep_ml, master_task) + call broadcast_scalar (f_upNO, master_task) + call broadcast_scalar (f_upNH, master_task) call broadcast_scalar (f_bTin, master_task) call broadcast_scalar (f_bphi, master_task) - call broadcast_scalar (f_iDi, master_task) - call broadcast_scalar (f_iki, master_task) - call broadcast_scalar (f_bgc_S, master_task) - call broadcast_scalar (f_zfswin, master_task) - call broadcast_scalar (f_PPnet, master_task) - call broadcast_scalar (f_algalpeak, master_task) - call broadcast_scalar (f_zbgc_frac, master_task) - call broadcast_scalar (f_peakval, master_task) - call broadcast_scalar (f_grownet, master_task) - call broadcast_scalar (f_chlnet, master_task) - call broadcast_scalar (f_Nitnet, master_task) - call broadcast_scalar (f_Nnet, master_task) - call broadcast_scalar (f_Cnet, master_task) - call broadcast_scalar (f_DOCnet, master_task) - call broadcast_scalar (f_DICnet, master_task) - call broadcast_scalar (f_Amnet, master_task) - call broadcast_scalar (f_Silnet, master_task) - call broadcast_scalar (f_humnet, master_task) - call broadcast_scalar (f_DMSPpnet, master_task) - call broadcast_scalar (f_DMSPdnet, master_task) - call broadcast_scalar (f_DMSnet, master_task) - call broadcast_scalar (f_PONnet, master_task) - call broadcast_scalar (f_DONnet, master_task) - call broadcast_scalar (f_Fednet, master_task) - call broadcast_scalar (f_Fepnet, master_task) - call broadcast_scalar (f_zaeronet, master_task) - call broadcast_scalar (f_chlsnow, master_task) - call broadcast_scalar (f_Nitsnow, master_task) - call broadcast_scalar (f_Nsnow, master_task) - call broadcast_scalar (f_Csnow, master_task) - call broadcast_scalar (f_DOCsnow, master_task) - call broadcast_scalar (f_DICsnow, master_task) - call broadcast_scalar (f_Amsnow, master_task) - call broadcast_scalar (f_Silsnow, master_task) - call broadcast_scalar (f_humsnow, master_task) - call broadcast_scalar (f_DMSPpsnow, master_task) - call broadcast_scalar (f_DMSPdsnow, master_task) - call broadcast_scalar (f_DMSsnow, master_task) - call broadcast_scalar (f_PONsnow, master_task) - call broadcast_scalar (f_DONsnow, master_task) - call broadcast_scalar (f_Fedsnow, master_task) - call broadcast_scalar (f_Fepsnow, master_task) - call broadcast_scalar (f_zaerosnow, master_task) - call broadcast_scalar (f_chlfrac, master_task) - call broadcast_scalar (f_Nitfrac, master_task) - call broadcast_scalar (f_Nfrac, master_task) - call broadcast_scalar (f_DOCfrac, master_task) - call broadcast_scalar (f_DICfrac, master_task) - call broadcast_scalar (f_Amfrac, master_task) - call broadcast_scalar (f_Silfrac, master_task) - call broadcast_scalar (f_humfrac, master_task) - call broadcast_scalar (f_DMSPpfrac, master_task) - call broadcast_scalar (f_DMSPdfrac, master_task) - call broadcast_scalar (f_DMSfrac, master_task) - call broadcast_scalar (f_PONfrac, master_task) - call broadcast_scalar (f_DONfrac, master_task) - call broadcast_scalar (f_Fedfrac, master_task) - call broadcast_scalar (f_Fepfrac, master_task) - call broadcast_scalar (f_zaerofrac, master_task) + call broadcast_scalar (f_iDi, master_task) + call broadcast_scalar (f_iki, master_task) + call broadcast_scalar (f_bgc_S, master_task) + call broadcast_scalar (f_zfswin, master_task) + call broadcast_scalar (f_PPnet, master_task) + call broadcast_scalar (f_algalpeak, master_task) + call broadcast_scalar (f_zbgc_frac, master_task) + call broadcast_scalar (f_peakval, master_task) + call broadcast_scalar (f_grownet, master_task) + call broadcast_scalar (f_chlnet, master_task) + call broadcast_scalar (f_Nitnet, master_task) + call broadcast_scalar (f_Nnet, master_task) + call broadcast_scalar (f_Cnet, master_task) + call broadcast_scalar (f_DOCnet, master_task) + call broadcast_scalar (f_DICnet, master_task) + call broadcast_scalar (f_Amnet, master_task) + call broadcast_scalar (f_Silnet, master_task) + call broadcast_scalar (f_humnet, master_task) + call broadcast_scalar (f_DMSPpnet, master_task) + call broadcast_scalar (f_DMSPdnet, master_task) + call broadcast_scalar (f_DMSnet, master_task) + call broadcast_scalar (f_PONnet, master_task) + call broadcast_scalar (f_DONnet, master_task) + call broadcast_scalar (f_Fednet, master_task) + call broadcast_scalar (f_Fepnet, master_task) + call broadcast_scalar (f_zaeronet, master_task) + call broadcast_scalar (f_chlsnow, master_task) + call broadcast_scalar (f_Nitsnow, master_task) + call broadcast_scalar (f_Nsnow, master_task) + call broadcast_scalar (f_Csnow, master_task) + call broadcast_scalar (f_DOCsnow, master_task) + call broadcast_scalar (f_DICsnow, master_task) + call broadcast_scalar (f_Amsnow, master_task) + call broadcast_scalar (f_Silsnow, master_task) + call broadcast_scalar (f_humsnow, master_task) + call broadcast_scalar (f_DMSPpsnow, master_task) + call broadcast_scalar (f_DMSPdsnow, master_task) + call broadcast_scalar (f_DMSsnow, master_task) + call broadcast_scalar (f_PONsnow, master_task) + call broadcast_scalar (f_DONsnow, master_task) + call broadcast_scalar (f_Fedsnow, master_task) + call broadcast_scalar (f_Fepsnow, master_task) + call broadcast_scalar (f_zaerosnow, master_task) + call broadcast_scalar (f_chlfrac, master_task) + call broadcast_scalar (f_Nitfrac, master_task) + call broadcast_scalar (f_Nfrac, master_task) + call broadcast_scalar (f_DOCfrac, master_task) + call broadcast_scalar (f_DICfrac, master_task) + call broadcast_scalar (f_Amfrac, master_task) + call broadcast_scalar (f_Silfrac, master_task) + call broadcast_scalar (f_humfrac, master_task) + call broadcast_scalar (f_DMSPpfrac, master_task) + call broadcast_scalar (f_DMSPdfrac, master_task) + call broadcast_scalar (f_DMSfrac, master_task) + call broadcast_scalar (f_PONfrac, master_task) + call broadcast_scalar (f_DONfrac, master_task) + call broadcast_scalar (f_Fedfrac, master_task) + call broadcast_scalar (f_Fepfrac, master_task) + call broadcast_scalar (f_zaerofrac, master_task) ! 2D variables @@ -820,28 +820,28 @@ subroutine init_hist_bgc_2D enddo endif - ! zsalinity - + ! zsalinity + call define_hist_field(n_fzsal,"fzsal","kg/m^2/s",tstr2D, tcstr, & "prognostic salt flux ice to ocn (cpl)", & "if positive, ocean gains salt", c1, c0, & ns, f_fzsal) - + call define_hist_field(n_fzsal_ai,"fzsal_ai","kg/m^2/s",tstr2D, tcstr, & "prognostic salt flux ice to ocean", & "weighted by ice area", c1, c0, & ns, f_fzsal_ai) - + call define_hist_field(n_fzsal_g,"fzsal_g","kg/m^2/s",tstr2D, tcstr, & "Gravity drainage salt flux ice to ocn (cpl)", & "if positive, ocean gains salt", c1, c0, & ns, f_fzsal_g) - + call define_hist_field(n_fzsal_g_ai,"fzsal_g_ai","kg/m^2/s",tstr2D, tcstr, & "Gravity drainage salt flux ice to ocean", & "weighted by ice area", c1, c0, & ns, f_fzsal_g_ai) - + call define_hist_field(n_zsal,"zsal_tot","g/m^2",tstr2D, tcstr, & "Total Salt content", & "In ice volume*fbri", c1, c0, & @@ -971,8 +971,8 @@ subroutine init_hist_bgc_2D "skeletal layer: bottom 3 cm", c1, c0, & ns, f_bgc_Fep ) enddo - endif !f_bgc_Fe - + endif !f_bgc_Fe + if (f_bgc_Nit(1:1) /= 'x') & call define_hist_field(n_bgc_Nit,"Nit","mmol/m^2",tstr2D, tcstr, & "Bulk skeletal nutrient (nitrate)", & @@ -1013,7 +1013,7 @@ subroutine init_hist_bgc_2D "Bulk dissolved skl trace gas (DMS)", & "skeletal layer: bottom 3 cm", c1, c0, & ns, f_bgc_DMS) - + endif !skl_bgc ! vertical and skeletal layer biogeochemistry @@ -1049,7 +1049,7 @@ subroutine init_hist_bgc_2D enddo endif if (f_bgc_Fed_ml (1:1) /= 'x') then - do n = 1, n_fed + do n = 1, n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'ml_dFe', trim(nchar) call define_hist_field(n_bgc_Fed_ml (n,:),vname_in,"nM",tstr2D, tcstr, & @@ -1059,7 +1059,7 @@ subroutine init_hist_bgc_2D enddo endif if (f_bgc_Fep_ml (1:1) /= 'x') then - do n = 1, n_fep + do n = 1, n_fep write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'ml_pFe', trim(nchar) call define_hist_field(n_bgc_Fep_ml (n,:),vname_in,"nM",tstr2D, tcstr, & @@ -1097,7 +1097,7 @@ subroutine init_hist_bgc_2D call define_hist_field(n_bgc_hum_ml,"ml_hum","mmol/m^3",tstr2D, tcstr, & "mixed layer humic material (carbon)", & "upper ocean", c1, c0, & - ns, f_bgc_hum_ml) + ns, f_bgc_hum_ml) if (f_bgc_DMSP_ml(1:1) /= 'x') & call define_hist_field(n_bgc_DMSP_ml,"ml_DMSP","mmol/m^3",tstr2D, tcstr, & "mixed layer precursor (DMSP)", & @@ -1108,30 +1108,30 @@ subroutine init_hist_bgc_2D "mixed layer trace gas (DMS)", & "upper ocean", c1, c0, & ns, f_bgc_DMS_ml) - + if (f_fNit(1:1) /= 'x') & call define_hist_field(n_fNit,"fNit","mmol/m^2/s",tstr2D, tcstr, & "nitrate flux ice to ocn (cpl)", & "if positive, ocean gains nitrate", c1, c0, & ns, f_fNit) - + if (f_fNit_ai(1:1) /= 'x') & call define_hist_field(n_fNit_ai,"fNit_ai","mmol/m^2/s",tstr2D, tcstr, & "nitrate flux ice to ocean", & "weighted by ice area", c1, c0, & ns, f_fNit_ai) - + if (f_fAm(1:1) /= 'x') & call define_hist_field(n_fAm,"fAm","mmol/m^2/s",tstr2D, tcstr, & "ammonium flux ice to ocn (cpl)", & "if positive, ocean gains ammonium", c1, c0, & ns, f_fAm) - + if (f_fAm_ai(1:1) /= 'x') & call define_hist_field(n_fAm_ai,"fAm_ai","mmol/m^2/s",tstr2D, tcstr, & "ammonium flux ice to ocean", & "weighted by ice area", c1, c0, & - ns, f_fAm_ai) + ns, f_fAm_ai) if (f_fN(1:1) /= 'x') then do n = 1, n_algae write(nchar,'(i3.3)') n @@ -1171,7 +1171,7 @@ subroutine init_hist_bgc_2D "weighted by ice area", c1, c0, & ns, f_fDOC_ai) enddo - endif + endif if (f_fDIC(1:1) /= 'x') then do n = 1, n_dic write(nchar,'(i3.3)') n @@ -1191,7 +1191,7 @@ subroutine init_hist_bgc_2D "weighted by ice area", c1, c0, & ns, f_fDIC_ai) enddo - endif + endif if (f_fDON(1:1) /= 'x') then do n = 1, n_don write(nchar,'(i3.3)') n @@ -1211,7 +1211,7 @@ subroutine init_hist_bgc_2D "weighted by ice area", c1, c0, & ns, f_fDON_ai) enddo - endif + endif if (f_fFed(1:1) /= 'x') then do n = 1, n_fed write(nchar,'(i3.3)') n @@ -1221,9 +1221,9 @@ subroutine init_hist_bgc_2D "positive to ocean", c1, c0, & ns, f_fFed ) enddo - endif + endif if (f_fFed_ai (1:1) /= 'x') then - do n = 1, n_fed + do n = 1, n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'fdFe_ai', trim(nchar) call define_hist_field(n_fFed_ai (n,:),vname_in,"umol/m^2/s",tstr2D, tcstr, & @@ -1231,7 +1231,7 @@ subroutine init_hist_bgc_2D "weighted by ice area", c1, c0, & ns, f_fFed_ai ) enddo - endif + endif if (f_fFep(1:1) /= 'x') then do n = 1, n_fep write(nchar,'(i3.3)') n @@ -1243,7 +1243,7 @@ subroutine init_hist_bgc_2D enddo endif if (f_fFep_ai (1:1) /= 'x') then - do n = 1, n_fep + do n = 1, n_fep write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'fpFe_ai', trim(nchar) call define_hist_field(n_fFep_ai (n,:),vname_in,"umol/m^2/s",tstr2D, tcstr, & @@ -1251,25 +1251,25 @@ subroutine init_hist_bgc_2D "weighted by ice area", c1, c0, & ns, f_fFep_ai ) enddo - endif + endif if (f_fSil(1:1) /= 'x') & call define_hist_field(n_fSil,"fSil","mmol/m^2/s",tstr2D, tcstr, & "silicate flux ice to ocn (cpl)", & "positive into ocean", c1, c0, & ns, f_fSil) - + if (f_fSil_ai(1:1) /= 'x') & call define_hist_field(n_fSil_ai,"fSil_ai","mmol/m^2/s",tstr2D, tcstr, & "silicate flux ice to ocean", & "weighted by ice area", c1, c0, & ns, f_fSil_ai) - + if (f_fhum(1:1) /= 'x') & call define_hist_field(n_fhum,"fhum","mmol/m^2/s",tstr2D, tcstr, & "humic matter (carbon) flux ice to ocn (cpl)", & "positive into ocean", c1, c0, & ns, f_fhum) - + if (f_fhum_ai(1:1) /= 'x') & call define_hist_field(n_fhum_ai,"fhum_ai","mmol/m^2/s",tstr2D, tcstr, & "humic matter (carbon) flux ice to ocean", & @@ -1336,19 +1336,19 @@ subroutine init_hist_bgc_2D "weighted by brine or skl volume ", c1, c0, & ns, f_grownet) - if (f_upNO(1:1) /= 'x') & + if (f_upNO(1:1) /= 'x') & call define_hist_field(n_upNO,"upNO","mmol/m^2/d",tstr2D, tcstr, & "Tot algal Nit uptake rate", & "weighted by ice area", c1, c0, & ns, f_upNO) - if (f_upNH(1:1) /= 'x') & + if (f_upNH(1:1) /= 'x') & call define_hist_field(n_upNH,"upNH","mmol/m^2/d",tstr2D, tcstr, & "Tot algal Am uptake rate", & "weighted by ice area", c1, c0,& ns, f_upNH) - ! vertical biogeochemistry + ! vertical biogeochemistry if (z_tracers) then if (f_fzaero(1:1) /= 'x') then @@ -1463,7 +1463,7 @@ subroutine init_hist_bgc_2D enddo endif !f_DONnet if (f_Fednet (1:1) /= 'x') then - do n = 1, n_fed + do n = 1, n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'dFe_net', trim(nchar) call define_hist_field(n_Fednet (n,:),vname_in,"umol/m^2",tstr2D, tcstr, & @@ -1471,9 +1471,9 @@ subroutine init_hist_bgc_2D "weighted by ice area ", c1, c0, & ns, f_Fednet ) enddo - endif !f_Fednet + endif !f_Fednet if (f_Fepnet (1:1) /= 'x') then - do n = 1, n_fep + do n = 1, n_fep write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'pFe_net', trim(nchar) call define_hist_field(n_Fepnet (n,:),vname_in,"umol/m^2",tstr2D, tcstr, & @@ -1481,7 +1481,7 @@ subroutine init_hist_bgc_2D "weighted by ice area ", c1, c0, & ns, f_Fepnet ) enddo - endif !f_Fepnet + endif !f_Fepnet if (f_Nitnet(1:1) /= 'x') & call define_hist_field(n_Nitnet,"Nit_net","mmol/m^2",tstr2D, tcstr, & "Net Nitrate", & @@ -1501,7 +1501,7 @@ subroutine init_hist_bgc_2D call define_hist_field(n_humnet,"hum_net","mmol/m^2",tstr2D, tcstr, & "Net humic material (carbon)", & "weighted by ice area", c1, c0, & - ns, f_humnet) + ns, f_humnet) if (f_DMSPpnet(1:1) /= 'x') & call define_hist_field(n_DMSPpnet,"DMSPp_net","mmol/m^2",tstr2D, tcstr, & "Net DMSPp", & @@ -1524,7 +1524,7 @@ subroutine init_hist_bgc_2D ns, f_PONnet) if (f_zaerosnow(1:1) /= 'x') then - do n = 1, n_zaero + do n = 1, n_zaero write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'zaero_snow', trim(nchar) call define_hist_field(n_zaerosnow(n,:),vname_in,"kg/m^2",tstr2D, tcstr, & @@ -1594,7 +1594,7 @@ subroutine init_hist_bgc_2D enddo endif !f_DONsnow if (f_Fedsnow (1:1) /= 'x') then - do n = 1, n_fed + do n = 1, n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'dFe_snow', trim(nchar) call define_hist_field(n_Fedsnow (n,:),vname_in,"umol/m^2",tstr2D, tcstr, & @@ -1602,9 +1602,9 @@ subroutine init_hist_bgc_2D "weighted by ice area ", c1, c0, & ns, f_Fedsnow ) enddo - endif !f_Fedsnow + endif !f_Fedsnow if (f_Fepsnow (1:1) /= 'x') then - do n = 1, n_fed + do n = 1, n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'pFe_snow', trim(nchar) call define_hist_field(n_Fepsnow (n,:),vname_in,"umol/m^2",tstr2D, tcstr, & @@ -1612,7 +1612,7 @@ subroutine init_hist_bgc_2D "weighted by ice area ", c1, c0, & ns, f_Fepsnow ) enddo - endif !f_Fepsnow + endif !f_Fepsnow if (f_Nitsnow(1:1) /= 'x') & call define_hist_field(n_Nitsnow,"Nit_snow","mmol/m^2",tstr2D, tcstr, & "Snow Nitrate", & @@ -1632,7 +1632,7 @@ subroutine init_hist_bgc_2D call define_hist_field(n_humsnow,"hum_snow","mmol/m^2",tstr2D, tcstr, & "Snow humic material (carbon)", & "weighted by ice area", c1, c0, & - ns, f_humsnow) + ns, f_humsnow) if (f_DMSPpsnow(1:1) /= 'x') & call define_hist_field(n_DMSPpsnow,"DMSPp_snow","mmol/m^2",tstr2D, tcstr, & "Snow DMSPp", & @@ -1655,7 +1655,7 @@ subroutine init_hist_bgc_2D ns, f_PONsnow) if (f_zaerofrac(1:1) /= 'x') then - do n = 1, n_zaero + do n = 1, n_zaero write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'zaero_frac', trim(nchar) call define_hist_field(n_zaerofrac(n,:),vname_in,"1",tstr2D, tcstr, & @@ -1715,7 +1715,7 @@ subroutine init_hist_bgc_2D enddo endif !f_DONfrac if (f_Fedfrac (1:1) /= 'x') then - do n = 1, n_fed + do n = 1, n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'dFe_frac', trim(nchar) call define_hist_field(n_Fedfrac (n,:),vname_in,"1",tstr2D, tcstr, & @@ -1723,9 +1723,9 @@ subroutine init_hist_bgc_2D "averaged over depth ", c1, c0, & ns, f_Fedfrac ) enddo - endif !f_Fedfrac + endif !f_Fedfrac if (f_Fepfrac (1:1) /= 'x') then - do n = 1, n_fep + do n = 1, n_fep write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'pFe_frac', trim(nchar) call define_hist_field(n_Fepfrac (n,:),vname_in,"1",tstr2D, tcstr, & @@ -1733,7 +1733,7 @@ subroutine init_hist_bgc_2D "averaged over depth ", c1, c0, & ns, f_Fepfrac ) enddo - endif !f_Fepfrac + endif !f_Fepfrac if (f_Nitfrac(1:1) /= 'x') & call define_hist_field(n_Nitfrac,"Nit_frac","1",tstr2D, tcstr, & "Mobile frac Nitrate", & @@ -1753,7 +1753,7 @@ subroutine init_hist_bgc_2D call define_hist_field(n_humfrac,"hum_frac","1",tstr2D, tcstr, & "Mobile frac humic material", & "averaged over depth", c1, c0, & - ns, f_humfrac) + ns, f_humfrac) if (f_DMSPpfrac(1:1) /= 'x') & call define_hist_field(n_DMSPpfrac,"DMSPp_frac","1",tstr2D, tcstr, & "Mobile frac DMSPp", & @@ -1787,8 +1787,8 @@ subroutine init_hist_bgc_2D endif ! histfreq(ns) /= 'x' enddo ! nstreams - endif ! tr_aero, etc - + endif ! tr_aero, etc + end subroutine init_hist_bgc_2D !======================================================================= @@ -1834,7 +1834,7 @@ subroutine init_hist_bgc_3Db real (kind=dbl_kind) :: secday logical (kind=log_kind) :: solve_zsal, z_tracers character(len=*), parameter :: subname = '(init_hist_bgc_3Db)' - + ! biology vertical grid call icepack_query_parameters(secday_out=secday) @@ -1848,7 +1848,7 @@ subroutine init_hist_bgc_3Db do ns = 1, nstreams if (histfreq(ns) /= 'x') then - + if (f_bTin(1:1) /= 'x') & call define_hist_field(n_bTin,"bTizn","C",tstr3Db, tcstr, & "ice internal temperatures on bio grid", & @@ -1859,27 +1859,27 @@ subroutine init_hist_bgc_3Db call define_hist_field(n_bphi,"bphizn","%",tstr3Db, tcstr, & "porosity", "brine volume fraction", c100, c0, & ns, f_bphi) - - if (f_iDi(1:1) /= 'x') & + + if (f_iDi(1:1) /= 'x') & call define_hist_field(n_iDi,"iDin","m^2/d",tstr3Db, tcstr, & "interface diffusivity", "on bio interface grid", secday, c0, & ns, f_iDi) - - if (f_iki(1:1) /= 'x') & + + if (f_iki(1:1) /= 'x') & call define_hist_field(n_iki,"ikin","mm^2",tstr3Db, tcstr, & "permeability", "on bio interface grid", 1.0e6_dbl_kind, c0, & ns, f_iki) - + if (f_bgc_S(1:1) /= 'x') & call define_hist_field(n_bgc_S,"bgc_S","ppt",tstr3Db, tcstr, & "bulk salinity", "on bio grid", c1, c0, & ns, f_bgc_S) - + if (f_zfswin(1:1) /= 'x') & call define_hist_field(n_zfswin,"zfswin","W/m^2",tstr3Db, tcstr, & "internal ice PAR", "on bio interface grid", c1, c0, & ns, f_zfswin) - + endif ! histfreq(ns) /= 'x' enddo ! ns @@ -1903,8 +1903,8 @@ subroutine accum_hist_bgc (iblk) use ice_flux, only: sss use ice_flux_bgc, only: fiso_atm, fiso_ocn, faero_atm, faero_ocn, & flux_bio, flux_bio_ai, fzsal_ai, fzsal_g_ai - use ice_history_shared, only: n2D, a2D, a3Dc, & - n3Dzcum, n3Dbcum, a3Db, a3Da, & + use ice_history_shared, only: n2D, a2D, a3Dc, & + n3Dzcum, n3Dbcum, a3Db, a3Da, & ncat_hist, accum_hist_field, nzblyr, nzalyr use ice_state, only: trcrn, trcr, aicen, aice, vicen @@ -1914,24 +1914,24 @@ subroutine accum_hist_bgc (iblk) ! local variables integer (kind=int_kind) :: & - i, j, n, k, & ! loop indices + i, j, n, k, & ! loop indices ilo,ihi,jlo,jhi ! beginning and end of physical domain - real (kind=dbl_kind), dimension (nx_block,ny_block,nblyr+4) :: & + real (kind=dbl_kind), dimension (nx_block,ny_block,nblyr+4) :: & workz, workz2 - real (kind=dbl_kind) :: & + real (kind=dbl_kind) :: & maxv, rhos, rhoi, rhow, puny, sk_l - real (kind=dbl_kind), dimension (nblyr+1) :: & + real (kind=dbl_kind), dimension (nblyr+1) :: & workv - real (kind=dbl_kind), dimension (nx_block,ny_block) :: & + real (kind=dbl_kind), dimension (nx_block,ny_block) :: & workni, worknj - integer (kind=int_kind), dimension (1) :: & + integer (kind=int_kind), dimension (1) :: & worki - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & workii logical (kind=log_kind) :: & @@ -1949,9 +1949,9 @@ subroutine accum_hist_bgc (iblk) integer (kind=int_kind), dimension(icepack_max_aero) :: & nlt_zaero_sw ! points to aerosol in trcrn_sw - + integer (kind=int_kind), dimension(icepack_max_algae) :: & - nt_bgc_N, nlt_bgc_N, & ! algae + nt_bgc_N, nlt_bgc_N, & ! algae nt_bgc_C, nlt_bgc_C, & ! nt_bgc_chl, nlt_bgc_chl ! @@ -2009,8 +2009,8 @@ subroutine accum_hist_bgc (iblk) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - - this_block = get_block(blocks_ice(iblk),iblk) + + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -2026,15 +2026,15 @@ subroutine accum_hist_bgc (iblk) if (tr_iso .or. tr_aero .or. tr_brine .or. solve_zsal .or. skl_bgc) then ! zsalinity - if (f_fzsal (1:1) /= 'x') & + if (f_fzsal (1:1) /= 'x') & call accum_hist_field(n_fzsal, iblk, fzsal(:,:,iblk), a2D) - if (f_fzsal_ai(1:1)/= 'x') & + if (f_fzsal_ai(1:1)/= 'x') & call accum_hist_field(n_fzsal_ai, iblk, fzsal_ai(:,:,iblk), a2D) - if (f_fzsal_g (1:1) /= 'x') & + if (f_fzsal_g (1:1) /= 'x') & call accum_hist_field(n_fzsal_g, iblk, fzsal_g(:,:,iblk), a2D) - if (f_fzsal_g_ai(1:1)/= 'x') & + if (f_fzsal_g_ai(1:1)/= 'x') & call accum_hist_field(n_fzsal_g_ai,iblk, fzsal_g_ai(:,:,iblk), a2D) - if (f_zsal (1:1) /= 'x') & + if (f_zsal (1:1) /= 'x') & call accum_hist_field(n_zsal, iblk, zsal_tot(:,:,iblk), a2D) ! isotopes @@ -2120,13 +2120,13 @@ subroutine accum_hist_bgc (iblk) enddo endif if (f_bgc_Fed (1:1)/= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_bgc_Fed (n,:), iblk, & sk_l*trcr(:,:,nt_bgc_Fed (n), iblk), a2D) enddo endif if (f_bgc_Fep (1:1)/= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_bgc_Fep (n,:), iblk, & sk_l*trcr(:,:,nt_bgc_Fep (n), iblk), a2D) enddo @@ -2139,32 +2139,32 @@ subroutine accum_hist_bgc (iblk) endif if (f_bgc_Nit(1:1)/= 'x') & call accum_hist_field(n_bgc_Nit, iblk, & - sk_l*trcr(:,:,nt_bgc_Nit, iblk), a2D) + sk_l*trcr(:,:,nt_bgc_Nit, iblk), a2D) if (f_bgc_Am(1:1)/= 'x') & call accum_hist_field(n_bgc_Am, iblk, & - sk_l*trcr(:,:,nt_bgc_Am, iblk), a2D) + sk_l*trcr(:,:,nt_bgc_Am, iblk), a2D) if (f_bgc_Sil(1:1)/= 'x') & call accum_hist_field(n_bgc_Sil, iblk, & - sk_l*trcr(:,:,nt_bgc_Sil, iblk), a2D) + sk_l*trcr(:,:,nt_bgc_Sil, iblk), a2D) if (f_bgc_hum(1:1)/= 'x') & call accum_hist_field(n_bgc_hum, iblk, & - sk_l*trcr(:,:,nt_bgc_hum, iblk), a2D) + sk_l*trcr(:,:,nt_bgc_hum, iblk), a2D) if (f_bgc_PON(1:1)/= 'x') & call accum_hist_field(n_bgc_PON, iblk, & - sk_l*trcr(:,:,nt_bgc_PON, iblk), a2D) + sk_l*trcr(:,:,nt_bgc_PON, iblk), a2D) if (f_bgc_DMSPp(1:1)/= 'x') & call accum_hist_field(n_bgc_DMSPp,iblk, & - sk_l*trcr(:,:,nt_bgc_DMSPp,iblk), a2D) + sk_l*trcr(:,:,nt_bgc_DMSPp,iblk), a2D) if (f_bgc_DMSPd(1:1)/= 'x') & call accum_hist_field(n_bgc_DMSPd,iblk, & - sk_l*trcr(:,:,nt_bgc_DMSPd,iblk), a2D) + sk_l*trcr(:,:,nt_bgc_DMSPd,iblk), a2D) if (f_bgc_DMS(1:1)/= 'x') & call accum_hist_field(n_bgc_DMS, iblk, & - sk_l*trcr(:,:,nt_bgc_DMS, iblk), a2D) + sk_l*trcr(:,:,nt_bgc_DMS, iblk), a2D) - endif !skl_bgc + endif !skl_bgc - ! skeletal layer and vertical bgc + ! skeletal layer and vertical bgc if (f_bgc_DOC_ml(1:1)/= 'x') then do n=1,n_doc @@ -2185,13 +2185,13 @@ subroutine accum_hist_bgc (iblk) enddo endif if (f_bgc_Fed_ml (1:1)/= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_bgc_Fed_ml (n,:), iblk, & ocean_bio(:,:,nlt_bgc_Fed (n), iblk), a2D) enddo endif if (f_bgc_Fep_ml (1:1)/= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_bgc_Fep_ml (n,:), iblk, & ocean_bio(:,:,nlt_bgc_Fep (n), iblk), a2D) enddo @@ -2204,22 +2204,22 @@ subroutine accum_hist_bgc (iblk) endif if (f_bgc_Nit_ml(1:1)/= 'x') & call accum_hist_field(n_bgc_Nit_ml, iblk, & - ocean_bio(:,:,nlt_bgc_Nit, iblk), a2D) + ocean_bio(:,:,nlt_bgc_Nit, iblk), a2D) if (f_bgc_Am_ml(1:1)/= 'x') & call accum_hist_field(n_bgc_Am_ml, iblk, & - ocean_bio(:,:,nlt_bgc_Am, iblk), a2D) + ocean_bio(:,:,nlt_bgc_Am, iblk), a2D) if (f_bgc_Sil_ml(1:1)/= 'x') & call accum_hist_field(n_bgc_Sil_ml, iblk, & - ocean_bio(:,:,nlt_bgc_Sil, iblk), a2D) + ocean_bio(:,:,nlt_bgc_Sil, iblk), a2D) if (f_bgc_hum_ml(1:1)/= 'x') & call accum_hist_field(n_bgc_hum_ml, iblk, & - ocean_bio(:,:,nlt_bgc_hum, iblk), a2D) + ocean_bio(:,:,nlt_bgc_hum, iblk), a2D) if (f_bgc_DMSP_ml(1:1)/= 'x') & call accum_hist_field(n_bgc_DMSP_ml, iblk, & - ocean_bio(:,:,nlt_bgc_DMSPd, iblk), a2D) + ocean_bio(:,:,nlt_bgc_DMSPd, iblk), a2D) if (f_bgc_DMS_ml(1:1)/= 'x') & call accum_hist_field(n_bgc_DMS_ml, iblk, & - ocean_bio(:,:,nlt_bgc_DMS, iblk), a2D) + ocean_bio(:,:,nlt_bgc_DMS, iblk), a2D) if (f_fNit (1:1) /= 'x') & call accum_hist_field(n_fNit, iblk, & @@ -2283,25 +2283,25 @@ subroutine accum_hist_bgc (iblk) enddo endif if (f_fFed (1:1)/= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_fFed (n,:), iblk, & flux_bio(:,:,nlt_bgc_Fed (n),iblk), a2D) enddo endif if (f_fFed_ai (1:1)/= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_fFed_ai (n,:), iblk, & flux_bio_ai(:,:,nlt_bgc_Fed (n),iblk), a2D) enddo endif if (f_fFep (1:1)/= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_fFep (n,:), iblk, & flux_bio(:,:,nlt_bgc_Fep (n),iblk), a2D) enddo endif if (f_fFep_ai (1:1)/= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_fFep_ai (n,:), iblk, & flux_bio_ai(:,:,nlt_bgc_Fep (n),iblk), a2D) enddo @@ -2347,7 +2347,7 @@ subroutine accum_hist_bgc (iblk) PP_net(:,:,iblk), a2D) if (f_grownet (1:1) /= 'x') & call accum_hist_field(n_grownet, iblk, & - grow_net(:,:,iblk), a2D) + grow_net(:,:,iblk), a2D) if (f_upNO (1:1) /= 'x') & call accum_hist_field(n_upNO, iblk, & upNO(:,:,iblk), a2D) @@ -2355,7 +2355,7 @@ subroutine accum_hist_bgc (iblk) call accum_hist_field(n_upNH, iblk, & upNH(:,:,iblk), a2D) - ! vertical biogeochemistry + ! vertical biogeochemistry if (z_tracers) then @@ -2396,7 +2396,7 @@ subroutine accum_hist_bgc (iblk) enddo ! n endif !f_algalpeak - ! + ! ! ice_bio_net ! if (f_zaeronet (1:1) /= 'x') then @@ -2424,35 +2424,35 @@ subroutine accum_hist_bgc (iblk) enddo endif !f_Cnet if (f_DOCnet (1:1) /= 'x') then - do n=1,n_doc + do n=1,n_doc call accum_hist_field(n_DOCnet(n,:), iblk, & ice_bio_net(:,:,nlt_bgc_DOC(n), iblk), a2D) enddo endif !f_DOCnet if (f_DICnet (1:1) /= 'x') then - do n=1,n_dic + do n=1,n_dic call accum_hist_field(n_DICnet(n,:), iblk, & ice_bio_net(:,:,nlt_bgc_DIC(n), iblk), a2D) enddo endif !f_DICnet if (f_DONnet (1:1) /= 'x') then - do n=1,n_don + do n=1,n_don call accum_hist_field(n_DONnet(n,:), iblk, & ice_bio_net(:,:,nlt_bgc_DON(n), iblk), a2D) enddo endif !f_DONnet if (f_Fednet (1:1) /= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_Fednet (n,:), iblk, & ice_bio_net(:,:,nlt_bgc_Fed (n), iblk), a2D) enddo - endif !f_Fednet + endif !f_Fednet if (f_Fepnet (1:1) /= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_Fepnet (n,:), iblk, & ice_bio_net(:,:,nlt_bgc_Fep (n), iblk), a2D) enddo - endif !f_Fepnet + endif !f_Fepnet if (f_Nitnet (1:1) /= 'x') & call accum_hist_field(n_Nitnet, iblk, & @@ -2480,7 +2480,7 @@ subroutine accum_hist_bgc (iblk) ice_bio_net(:,:,nlt_bgc_PON, iblk), a2D) ! ! snow_bio_net - ! + ! if (f_zaerosnow (1:1) /= 'x') then do n=1,n_zaero call accum_hist_field(n_zaerosnow(n,:), iblk, & @@ -2506,35 +2506,35 @@ subroutine accum_hist_bgc (iblk) enddo endif !f_Csnow if (f_DOCsnow (1:1) /= 'x') then - do n=1,n_doc + do n=1,n_doc call accum_hist_field(n_DOCsnow(n,:), iblk, & snow_bio_net(:,:,nlt_bgc_DOC(n), iblk), a2D) enddo endif !f_DOCsnow if (f_DICsnow (1:1) /= 'x') then - do n=1,n_dic + do n=1,n_dic call accum_hist_field(n_DICsnow(n,:), iblk, & snow_bio_net(:,:,nlt_bgc_DIC(n), iblk), a2D) enddo endif !f_DICsnow if (f_DONsnow (1:1) /= 'x') then - do n=1,n_don + do n=1,n_don call accum_hist_field(n_DONsnow(n,:), iblk, & snow_bio_net(:,:,nlt_bgc_DON(n), iblk), a2D) enddo endif !f_DONsnow if (f_Fedsnow (1:1) /= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_Fedsnow (n,:), iblk, & snow_bio_net(:,:,nlt_bgc_Fed (n), iblk), a2D) enddo - endif !f_Fedsnow + endif !f_Fedsnow if (f_Fepsnow (1:1) /= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_Fepsnow (n,:), iblk, & snow_bio_net(:,:,nlt_bgc_Fep (n), iblk), a2D) enddo - endif !f_Fepsnow + endif !f_Fepsnow if (f_Nitsnow (1:1) /= 'x') & call accum_hist_field(n_Nitsnow, iblk, & @@ -2562,7 +2562,7 @@ subroutine accum_hist_bgc (iblk) snow_bio_net(:,:,nlt_bgc_PON, iblk), a2D) ! ! mobile frac - ! + ! if (f_zaerofrac (1:1) /= 'x') then do n=1,n_zaero call accum_hist_field(n_zaerofrac(n,:), iblk, & @@ -2582,35 +2582,35 @@ subroutine accum_hist_bgc (iblk) enddo endif !f_Nfrac if (f_DOCfrac (1:1) /= 'x') then - do n=1,n_doc + do n=1,n_doc call accum_hist_field(n_DOCfrac(n,:), iblk, & trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_DOC(n), iblk), a2D) enddo endif !f_DOCfrac if (f_DICfrac (1:1) /= 'x') then - do n=1,n_dic + do n=1,n_dic call accum_hist_field(n_DICfrac(n,:), iblk, & trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_DIC(n), iblk), a2D) enddo endif !f_DICfrac if (f_DONfrac (1:1) /= 'x') then - do n=1,n_don + do n=1,n_don call accum_hist_field(n_DONfrac(n,:), iblk, & trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_DON(n), iblk), a2D) enddo endif !f_DONfrac if (f_Fedfrac (1:1) /= 'x') then - do n=1,n_fed + do n=1,n_fed call accum_hist_field(n_Fedfrac (n,:), iblk, & trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_Fed (n), iblk), a2D) enddo - endif !f_Fedfrac + endif !f_Fedfrac if (f_Fepfrac (1:1) /= 'x') then - do n=1,n_fep + do n=1,n_fep call accum_hist_field(n_Fepfrac (n,:), iblk, & trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_Fep (n), iblk), a2D) enddo - endif !f_Fepfrac + endif !f_Fepfrac if (f_Nitfrac (1:1) /= 'x') & call accum_hist_field(n_Nitfrac, iblk, & @@ -2623,7 +2623,7 @@ subroutine accum_hist_bgc (iblk) trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_Sil, iblk), a2D) if (f_humfrac (1:1) /= 'x') & call accum_hist_field(n_humfrac, iblk, & - trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_hum, iblk), a2D) + trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_hum, iblk), a2D) if (f_DMSPpfrac (1:1) /= 'x') & call accum_hist_field(n_DMSPpfrac, iblk, & trcr(:,:,nt_zbgc_frac - 1 + nlt_bgc_DMSPp, iblk), a2D) @@ -2736,11 +2736,11 @@ subroutine accum_hist_bgc (iblk) do i = ilo, ihi if (aicen(i,j,n,iblk) > c0) then workz(i,j,k) = workz(i,j,k) + iDi(i,j,k,n,iblk)*vicen(i,j,n,iblk)**2/aicen(i,j,n,iblk) - workz(i,j,nzblyr) = workz(i,j,nzblyr-1) + workz(i,j,nzblyr) = workz(i,j,nzblyr-1) endif enddo ! i enddo ! j - enddo ! k + enddo ! k enddo ! n call accum_hist_field(n_iDi-n3Dzcum, iblk, nzblyr, & workz(:,:,1:nzblyr), a3Db) @@ -2760,7 +2760,7 @@ subroutine accum_hist_bgc (iblk) endif enddo ! i enddo ! j - enddo ! k + enddo ! k enddo ! n call accum_hist_field(n_iki-n3Dzcum, iblk, nzblyr, & workz(:,:,1:nzblyr), a3Db) @@ -2778,7 +2778,7 @@ subroutine accum_hist_bgc (iblk) workz(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_zaero(k)+nblyr+1:nt_zaero(k)+nblyr+2,iblk)/rhos workz(i,j,3:nblyr+3) = & !ice @@ -2786,7 +2786,7 @@ subroutine accum_hist_bgc (iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_zaero(k),iblk)/rhow !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_zaeros(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) enddo !k @@ -2797,14 +2797,14 @@ subroutine accum_hist_bgc (iblk) workz2(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_N(k)+nblyr+1:nt_bgc_N(k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice trcr(i,j,nt_bgc_N(k):nt_bgc_N(k)+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_N(k),iblk) !ocean endif - if (aicen(i,j,1,iblk) > puny) then + if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_N(k)+nblyr+1:nt_bgc_N(k)+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice @@ -2812,7 +2812,7 @@ subroutine accum_hist_bgc (iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_N(k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_N(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_N_cat1(k,:)-n3Dbcum, iblk, nzalyr, & @@ -2824,7 +2824,7 @@ subroutine accum_hist_bgc (iblk) workz(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow R_C2N(k)*trcr(i,j,nt_bgc_N(k)+nblyr+1:nt_bgc_N(k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice @@ -2832,25 +2832,25 @@ subroutine accum_hist_bgc (iblk) workz(i,j,nblyr+4) = R_C2N(k)*ocean_bio(i,j,nlt_bgc_N(k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_C(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) enddo !k endif !f_bgc_C if (f_bgc_DOC (1:1) /= 'x') then - do k = 1,n_doc + do k = 1,n_doc workz(:,:,:) = c0 workz2(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_DOC(k)+nblyr+1:nt_bgc_DOC(k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice trcr(i,j,nt_bgc_DOC(k):nt_bgc_DOC(k)+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DOC(k),iblk) !ocean endif - if (aicen(i,j,1,iblk) > puny) then + if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_DOC(k)+nblyr+1:nt_bgc_DOC(k)+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice @@ -2858,7 +2858,7 @@ subroutine accum_hist_bgc (iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DOC(k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_DOC(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_DOC_cat1(k,:)-n3Dbcum, iblk, nzalyr, & @@ -2866,19 +2866,19 @@ subroutine accum_hist_bgc (iblk) enddo !k endif !f_bgc_DOC if (f_bgc_DIC (1:1) /= 'x') then - do k = 1,n_dic + do k = 1,n_dic workz(:,:,:) = c0 workz2(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_DIC(k)+nblyr+1:nt_bgc_DIC(k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice trcr(i,j,nt_bgc_DIC(k):nt_bgc_DIC(k)+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DIC(k),iblk) !ocean endif - if (aicen(i,j,1,iblk) > puny) then + if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_DIC(k)+nblyr+1:nt_bgc_DIC(k)+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice @@ -2886,7 +2886,7 @@ subroutine accum_hist_bgc (iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DIC(k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_DIC(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_DIC_cat1(k,:)-n3Dbcum, iblk, nzalyr, & @@ -2894,19 +2894,19 @@ subroutine accum_hist_bgc (iblk) enddo !k endif !f_bgc_DIC if (f_bgc_DON (1:1) /= 'x') then - do k = 1,n_don + do k = 1,n_don workz(:,:,:) = c0 workz2(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_DON(k)+nblyr+1:nt_bgc_DON(k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice trcr(i,j,nt_bgc_DON(k):nt_bgc_DON(k)+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DON(k),iblk) !ocean endif - if (aicen(i,j,1,iblk) > puny) then + if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_DON(k)+nblyr+1:nt_bgc_DON(k)+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice @@ -2914,7 +2914,7 @@ subroutine accum_hist_bgc (iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DON(k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_DON(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_DON_cat1(k,:)-n3Dbcum, iblk, nzalyr, & @@ -2922,19 +2922,19 @@ subroutine accum_hist_bgc (iblk) enddo !k endif !f_bgc_DON if (f_bgc_Fed (1:1) /= 'x') then - do k = 1,n_fed + do k = 1,n_fed workz(:,:,:) = c0 workz2(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_Fed (k)+nblyr+1:nt_bgc_Fed (k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice trcr(i,j,nt_bgc_Fed (k):nt_bgc_Fed (k)+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Fed (k),iblk) !ocean endif - if (aicen(i,j,1,iblk) > puny) then + if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_Fed (k)+nblyr+1:nt_bgc_Fed (k)+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice @@ -2942,27 +2942,27 @@ subroutine accum_hist_bgc (iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Fed (k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_Fed (k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_Fed_cat1 (k,:)-n3Dbcum, iblk, nzalyr, & workz2(:,:,1:nzalyr), a3Da) enddo !k - endif !f_bgc_Fed + endif !f_bgc_Fed if (f_bgc_Fep (1:1) /= 'x') then - do k = 1,n_fep + do k = 1,n_fep workz(:,:,:) = c0 workz2(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_Fep (k)+nblyr+1:nt_bgc_Fep (k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice trcr(i,j,nt_bgc_Fep (k):nt_bgc_Fep (k)+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Fep (k),iblk) !ocean endif - if (aicen(i,j,1,iblk) > puny) then + if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_Fep (k)+nblyr+1:nt_bgc_Fep (k)+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice @@ -2970,19 +2970,19 @@ subroutine accum_hist_bgc (iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Fep (k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_Fep (k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_Fep_cat1 (k,:)-n3Dbcum, iblk, nzalyr, & workz2(:,:,1:nzalyr), a3Da) enddo !k - endif !f_bgc_Fep + endif !f_bgc_Fep if (f_bgc_chl (1:1) /= 'x') then do k = 1,n_algae workz(:,:,:) = c0 do j = jlo, jhi do i = ilo, ihi - if (aice(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_chl(k)+nblyr+1:nt_bgc_chl(k)+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice @@ -2990,12 +2990,12 @@ subroutine accum_hist_bgc (iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_chl(k),iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_chl(k,:)-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) enddo !k endif !f_bgc_chl - + if (f_bgc_Nit (1:1) /= 'x') then workz(:,:,:) = c0 workz2(:,:,:) = c0 @@ -3005,18 +3005,18 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_Nit+nblyr+2:nt_bgc_Nit+nblyr+3,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_Nit:nt_bgc_Nit+nblyr,iblk) + trcr(i,j,nt_bgc_Nit:nt_bgc_Nit+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Nit,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_Nit+nblyr+2:nt_bgc_Nit+nblyr+3,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_Nit:nt_bgc_Nit+nblyr,1,iblk) + trcrn(i,j,nt_bgc_Nit:nt_bgc_Nit+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Nit,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_Nit-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_Nit_cat1-n3Dbcum, iblk, nzalyr, & @@ -3032,18 +3032,18 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_Am+nblyr+1:nt_bgc_Am+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_Am:nt_bgc_Am+nblyr,iblk) + trcr(i,j,nt_bgc_Am:nt_bgc_Am+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Am,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_Am+nblyr+1:nt_bgc_Am+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_Am:nt_bgc_Am+nblyr,1,iblk) + trcrn(i,j,nt_bgc_Am:nt_bgc_Am+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Am,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_Am-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_Am_cat1-n3Dbcum, iblk, nzalyr, & @@ -3059,24 +3059,24 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_Sil+nblyr+1:nt_bgc_Sil+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_Sil:nt_bgc_Sil+nblyr,iblk) + trcr(i,j,nt_bgc_Sil:nt_bgc_Sil+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Sil,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_Sil+nblyr+1:nt_bgc_Sil+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_Sil:nt_bgc_Sil+nblyr,1,iblk) + trcrn(i,j,nt_bgc_Sil:nt_bgc_Sil+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_Sil,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_Sil-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_Sil_cat1-n3Dbcum, iblk, nzalyr, & workz2(:,:,1:nzalyr), a3Da) endif - + if (f_bgc_hum (1:1) /= 'x') then workz(:,:,:) = c0 @@ -3087,24 +3087,24 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_hum+nblyr+1:nt_bgc_hum+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_hum:nt_bgc_hum+nblyr,iblk) + trcr(i,j,nt_bgc_hum:nt_bgc_hum+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_hum,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_hum+nblyr+1:nt_bgc_hum+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_hum:nt_bgc_hum+nblyr,1,iblk) + trcrn(i,j,nt_bgc_hum:nt_bgc_hum+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_hum,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_hum-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_hum_cat1-n3Dbcum, iblk, nzalyr, & workz2(:,:,1:nzalyr), a3Da) endif - + if (f_bgc_DMSPd (1:1) /= 'x') then workz(:,:,:) = c0 workz2(:,:,:) = c0 @@ -3114,23 +3114,23 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_DMSPd+nblyr+1:nt_bgc_DMSPd+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_DMSPd:nt_bgc_DMSPd+nblyr,iblk) + trcr(i,j,nt_bgc_DMSPd:nt_bgc_DMSPd+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DMSPd,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_DMSPd+nblyr+1:nt_bgc_DMSPd+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_DMSPd:nt_bgc_DMSPd+nblyr,1,iblk) + trcrn(i,j,nt_bgc_DMSPd:nt_bgc_DMSPd+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DMSPd,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_DMSPd-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_DMSPd_cat1-n3Dbcum, iblk, nzalyr, & workz2(:,:,1:nzalyr), a3Da) - endif + endif if (f_bgc_DMSPp (1:1) /= 'x') then workz(:,:,:) = c0 @@ -3140,11 +3140,11 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_DMSPp+nblyr+1:nt_bgc_DMSPp+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_DMSPp:nt_bgc_DMSPp+nblyr,iblk) + trcr(i,j,nt_bgc_DMSPp:nt_bgc_DMSPp+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DMSPp,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_DMSPp-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) endif @@ -3158,18 +3158,18 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_DMS+nblyr+1:nt_bgc_DMS+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_DMS:nt_bgc_DMS+nblyr,iblk) + trcr(i,j,nt_bgc_DMS:nt_bgc_DMS+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DMS,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_DMS+nblyr+1:nt_bgc_DMS+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_DMS:nt_bgc_DMS+nblyr,1,iblk) + trcrn(i,j,nt_bgc_DMS:nt_bgc_DMS+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_DMS,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_DMS-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_DMS_cat1-n3Dbcum, iblk, nzalyr, & @@ -3185,18 +3185,18 @@ subroutine accum_hist_bgc (iblk) workz(i,j,1:2) = & !snow trcr(i,j,nt_bgc_PON+nblyr+1:nt_bgc_PON+nblyr+2,iblk) workz(i,j,3:nblyr+3) = & !ice - trcr(i,j,nt_bgc_PON:nt_bgc_PON+nblyr,iblk) + trcr(i,j,nt_bgc_PON:nt_bgc_PON+nblyr,iblk) workz(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_PON,iblk) !ocean endif if (aicen(i,j,1,iblk) > puny) then workz2(i,j,1:2) = & !snow trcrn(i,j,nt_bgc_PON+nblyr+1:nt_bgc_PON+nblyr+2,1,iblk) workz2(i,j,3:nblyr+3) = & !ice - trcrn(i,j,nt_bgc_PON:nt_bgc_PON+nblyr,1,iblk) + trcrn(i,j,nt_bgc_PON:nt_bgc_PON+nblyr,1,iblk) workz2(i,j,nblyr+4) = ocean_bio(i,j,nlt_bgc_PON,iblk) !ocean endif enddo ! i - enddo ! j + enddo ! j call accum_hist_field(n_bgc_PON-n3Dbcum, iblk, nzalyr, & workz(:,:,1:nzalyr), a3Da) call accum_hist_field(n_bgc_PON_cat1-n3Dbcum, iblk, nzalyr, & @@ -3220,19 +3220,19 @@ subroutine init_hist_bgc_3Da character (len=3) :: nchar character (len=16):: vname_in ! variable name character(len=*), parameter :: subname = '(init_hist_bgc_3Da)' - + call icepack_query_parameters(z_tracers_out=z_tracers) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) ! snow+bio grid - + if (z_tracers) then do ns = 1, nstreams if (histfreq(ns) /= 'x') then - + !---------------------------------------------------------------------------- ! snow+bio grid ==> ! 1:2 snow (surface layer +interior), 3:nblyr+2 ice (bio grid), nblyr+3 ocean @@ -3247,12 +3247,12 @@ subroutine init_hist_bgc_3Da ns, f_zaero) enddo endif - - if (f_bgc_Nit(1:1) /= 'x') & + + if (f_bgc_Nit(1:1) /= 'x') & call define_hist_field(n_bgc_Nit,"bgc_Nit","mmol/m^3",tstr3Da, tcstr, & "bulk nitrate ", "snow+bio grid", c1, c0, & ns, f_bgc_Nit) - + if (f_bgc_Am(1:1) /= 'x') & call define_hist_field(n_bgc_Am,"bgc_Am","mmol/m^3",tstr3Da, tcstr, & "bulk ammonia/um ", "snow+bio grid", c1, c0, & @@ -3313,7 +3313,7 @@ subroutine init_hist_bgc_3Da enddo endif if (f_bgc_Fed (1:1) /= 'x') then - do n=1,n_fed + do n=1,n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'bgc_Fed', trim(nchar) call define_hist_field(n_bgc_Fed (n,:),vname_in,"umol/m^3",tstr3Da, tcstr, & @@ -3322,7 +3322,7 @@ subroutine init_hist_bgc_3Da enddo endif if (f_bgc_Fep (1:1) /= 'x') then - do n=1,n_fep + do n=1,n_fep write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'bgc_Fep', trim(nchar) call define_hist_field(n_bgc_Fep (n,:),vname_in,"umol/m^3",tstr3Da, tcstr, & @@ -3330,32 +3330,32 @@ subroutine init_hist_bgc_3Da ns, f_bgc_Fep ) enddo endif - + if (f_bgc_Sil(1:1) /= 'x') & call define_hist_field(n_bgc_Sil,"bgc_Sil","mmol/m^3",tstr3Da, tcstr, & "bulk silicate ", "snow+bio grid", c1, c0, & ns, f_bgc_Sil) - + if (f_bgc_hum(1:1) /= 'x') & call define_hist_field(n_bgc_hum,"bgc_hum","mmol/m^3",tstr3Da, tcstr, & "bulk humic (carbon) material ", "snow+bio grid", c1, c0, & ns, f_bgc_hum) - + if (f_bgc_DMSPp(1:1) /= 'x') & call define_hist_field(n_bgc_DMSPp,"bgc_DMSPp","mmol/m^3",tstr3Da, tcstr, & "bulk algal DMSP ", "snow+bio grid", c1, c0,& ns, f_bgc_DMSPp) - + if (f_bgc_DMSPd(1:1) /= 'x') & call define_hist_field(n_bgc_DMSPd,"bgc_DMSPd","mmol/m^3",tstr3Da, tcstr, & "bulk dissolved DMSP ", "snow+bio grid", c1, c0, & ns, f_bgc_DMSPd) - + if (f_bgc_DMS(1:1) /= 'x') & call define_hist_field(n_bgc_DMS,"bgc_DMS","mmol/m^3",tstr3Da, tcstr, & "bulk DMS gas ", "snow+bio grid", c1, c0, & ns, f_bgc_DMS) - + if (f_bgc_PON(1:1) /= 'x') & call define_hist_field(n_bgc_PON,"bgc_PON","mmol/m^3",tstr3Da, tcstr, & "other bulk nitrogen pool ", "snow+bio grid", c1, c0, & @@ -3365,11 +3365,11 @@ subroutine init_hist_bgc_3Da ! Category 1 BGC !---------------------------------------------- - if (f_bgc_Nit_cat1(1:1) /= 'x') & + if (f_bgc_Nit_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_Nit_cat1,"bgc_Nit_cat1","mmol/m^3",tstr3Da, tcstr, & "bulk nitrate in cat 1 ", "snow+bio grid", c1, c0, & ns, f_bgc_Nit_cat1) - + if (f_bgc_Am_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_Am_cat1,"bgc_Am_cat1","mmol/m^3",tstr3Da, tcstr, & "bulk ammonia/um in cat 1", "snow+bio grid", c1, c0, & @@ -3412,7 +3412,7 @@ subroutine init_hist_bgc_3Da enddo endif if (f_bgc_Fed_cat1 (1:1) /= 'x') then - do n=1,n_fed + do n=1,n_fed write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'bgc_Fed_cat1', trim(nchar) call define_hist_field(n_bgc_Fed_cat1 (n,:),vname_in,"umol/m^3",tstr3Da, tcstr, & @@ -3421,7 +3421,7 @@ subroutine init_hist_bgc_3Da enddo endif if (f_bgc_Fep_cat1 (1:1) /= 'x') then - do n=1,n_fep + do n=1,n_fep write(nchar,'(i3.3)') n write(vname_in,'(a,a)') 'bgc_Fep_cat1', trim(nchar) call define_hist_field(n_bgc_Fep_cat1 (n,:),vname_in,"umol/m^3",tstr3Da, tcstr, & @@ -3429,32 +3429,32 @@ subroutine init_hist_bgc_3Da ns, f_bgc_Fep_cat1 ) enddo endif - + if (f_bgc_Sil_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_Sil_cat1,"bgc_Sil_cat1","mmol/m^3",tstr3Da, tcstr, & "bulk silicate in cat 1", "snow+bio grid", c1, c0, & ns, f_bgc_Sil_cat1) - + if (f_bgc_hum_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_hum,"bgc_hum_cat1","mmol/m^3",tstr3Da, tcstr, & "bulk humic (carbon) material in cat 1", "snow+bio grid", c1, c0, & ns, f_bgc_hum_cat1) - + if (f_bgc_DMSPd_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_DMSPd_cat1,"bgc_DMSPd_cat1","mmol/m^3",tstr3Da, tcstr, & "bulk dissolved DMSP in cat 1", "snow+bio grid", c1, c0, & ns, f_bgc_DMSPd_cat1) - + if (f_bgc_DMS_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_DMS_cat1,"bgc_DMS_cat1","mmol/m^3",tstr3Da, tcstr, & "bulk DMS gas in cat 1", "snow+bio grid", c1, c0, & ns, f_bgc_DMS_cat1) - + if (f_bgc_PON_cat1(1:1) /= 'x') & call define_hist_field(n_bgc_PON_cat1,"bgc_PON_cat1","mmol/m^3",tstr3Da, tcstr, & "other bulk nitrogen pool in cat 1", "snow+bio grid", c1, c0, & ns, f_bgc_PON_cat1) - + endif ! histfreq(ns) /= 'x' enddo !ns @@ -3473,7 +3473,7 @@ subroutine init_history_bgc use ice_arrays_column, only: PP_net, grow_net, hbri, & ice_bio_net, snow_bio_net, fbio_snoice, fbio_atmice, & - fzsal, fzsal_g, zfswin + fzsal, fzsal_g, zfswin use ice_flux_bgc, only: flux_bio, flux_bio_ai, fnit, fsil, & famm, fdmsp, fdms, fhum, fdust, falgalN, fdoc, fdic, & fdon, ffep, ffed diff --git a/cicecore/cicedynB/analysis/ice_history_drag.F90 b/cicecore/cicedynB/analysis/ice_history_drag.F90 index c0a1f99bd..fba19b364 100644 --- a/cicecore/cicedynB/analysis/ice_history_drag.F90 +++ b/cicecore/cicedynB/analysis/ice_history_drag.F90 @@ -1,7 +1,7 @@ !======================================================================= ! 2013 module for form drag parameters -! authors Michel Tsamados, David Schroeder, CPOM +! authors Michel Tsamados, David Schroeder, CPOM module ice_history_drag @@ -17,7 +17,7 @@ module ice_history_drag implicit none private public :: accum_hist_drag, init_hist_drag_2D - + !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value !--------------------------------------------------------------- @@ -31,7 +31,7 @@ module ice_history_drag !--------------------------------------------------------------- namelist / icefields_drag_nml / & - f_Cdn_atm, f_Cdn_ocn , & + f_Cdn_atm, f_Cdn_ocn , & f_drag !--------------------------------------------------------------- @@ -47,7 +47,7 @@ module ice_history_drag n_Cdn_atm_skin , n_Cdn_atm_floe, & n_Cdn_atm_pond , n_Cdn_atm_rdg, & n_Cdn_ocn_skin , n_Cdn_ocn_floe, & - n_Cdn_ocn_keel , n_Cdn_atm_ratio + n_Cdn_ocn_keel , n_Cdn_atm_ratio !======================================================================= @@ -124,43 +124,43 @@ subroutine init_hist_drag_2D "hdraft: draught", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_hridge,"hridge","m",tstr2D, tcstr, & "hridge: ridge height", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_distrdg,"distrdg","m",tstr2D, tcstr, & "distrdg: distance between ridges", & "none", c1, c0, & - ns, f_drag) + ns, f_drag) if (f_drag(1:1) /= 'x') & call define_hist_field(n_hkeel,"hkeel","m",tstr2D, tcstr, & "hkeel: keel depth", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_dkeel,"dkeel","m",tstr2D, tcstr, & "dkeel: distance between keels", & "none", c1, c0, & - ns, f_drag) + ns, f_drag) if (f_drag(1:1) /= 'x') & call define_hist_field(n_lfloe,"lfloe","m",tstr2D, tcstr, & "lfloe: floe length", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_dfloe,"dfloe","m",tstr2D, tcstr, & "dfloe: distance between floes", & "none", c1, c0, & - ns, f_drag) - + ns, f_drag) + if (f_Cdn_atm(1:1) /= 'x') & call define_hist_field(n_Cdn_atm,"Cdn_atm","none",tstr2D, tcstr, & "Ca: total ice-atm drag coefficient", & @@ -172,49 +172,49 @@ subroutine init_hist_drag_2D "Cdn_ocn: total ice-ocn drag coefficient", & "none", c1, c0, & ns, f_Cdn_ocn) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_atm_skin,"Cdn_atm_skin","none", & tstr2D, tcstr, & "Cdn_atm_skin: neutral skin ice-atm drag coefficient", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_atm_floe,"Cdn_atm_floe","none", & tstr2D, tcstr, & "Cdn_atm_floe: neutral floe edge ice-atm drag coefficient", & "none", c1, c0, & - ns, f_drag) - + ns, f_drag) + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_atm_pond,"Cdn_atm_pond","none", & tstr2D, tcstr, & "Cdn_atm_pond: neutral pond edge ice-atm drag coefficient", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_atm_rdg,"Cdn_atm_rdg","none", & tstr2D, tcstr, & "Cdn_atm_rdg: neutral ridge ice-atm drag coefficient", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_ocn_skin,"Cdn_ocn_skin","none", & tstr2D, tcstr, & "Cdn_ocn_skin: neutral skin ice-ocn drag coefficient", & "none", c1, c0, & ns, f_drag) - + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_ocn_floe,"Cdn_ocn_floe","none", & tstr2D, tcstr, & "Cdn_ocn_floe: neutral floe edge ice-ocn drag coefficient", & "none", c1, c0, & - ns, f_drag) - + ns, f_drag) + if (f_drag(1:1) /= 'x') & call define_hist_field(n_Cdn_ocn_keel,"Cdn_ocn_keel","none", & tstr2D, tcstr, & @@ -281,21 +281,21 @@ subroutine accum_hist_drag (iblk) call accum_hist_field(n_lfloe, iblk, lfloe(:,:,iblk), a2D) call accum_hist_field(n_dfloe, iblk, dfloe(:,:,iblk), a2D) call accum_hist_field(n_Cdn_atm_rdg, & - iblk, Cdn_atm_rdg(:,:,iblk), a2D) + iblk, Cdn_atm_rdg(:,:,iblk), a2D) call accum_hist_field(n_Cdn_atm_floe, & iblk, Cdn_atm_floe(:,:,iblk), a2D) call accum_hist_field(n_Cdn_atm_pond, & iblk, Cdn_atm_pond(:,:,iblk), a2D) call accum_hist_field(n_Cdn_atm_skin, & - iblk, Cdn_atm_skin(:,:,iblk), a2D) + iblk, Cdn_atm_skin(:,:,iblk), a2D) call accum_hist_field(n_Cdn_atm_ratio, & iblk, Cdn_atm_ratio(:,:,iblk), a2D) call accum_hist_field(n_Cdn_ocn_keel, & - iblk, Cdn_ocn_keel(:,:,iblk), a2D) + iblk, Cdn_ocn_keel(:,:,iblk), a2D) call accum_hist_field(n_Cdn_ocn_floe, & iblk, Cdn_ocn_floe(:,:,iblk), a2D) call accum_hist_field(n_Cdn_ocn_skin, & - iblk, Cdn_ocn_skin(:,:,iblk), a2D) + iblk, Cdn_ocn_skin(:,:,iblk), a2D) end if endif ! if(allocated(a2D)) diff --git a/cicecore/cicedynB/analysis/ice_history_fsd.F90 b/cicecore/cicedynB/analysis/ice_history_fsd.F90 index c64ecbefa..50fee99e7 100644 --- a/cicecore/cicedynB/analysis/ice_history_fsd.F90 +++ b/cicecore/cicedynB/analysis/ice_history_fsd.F90 @@ -21,7 +21,7 @@ module ice_history_fsd private public :: accum_hist_fsd, init_hist_fsd_2D, init_hist_fsd_3Df, & init_hist_fsd_4Df - + !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value !--------------------------------------------------------------- @@ -79,7 +79,6 @@ subroutine init_hist_fsd_2D integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag - real (kind=dbl_kind) :: secday logical (kind=log_kind) :: tr_fsd, wave_spec character(len=*), parameter :: subname = '(init_hist_fsd_2D)' @@ -273,12 +272,12 @@ subroutine init_hist_fsd_4Df if (histfreq(ns) /= 'x') then if (f_afsdn(1:1) /= 'x') & - call define_hist_field(n_afsdn,"afsdn","1",tstr4Df, tcstr, & + call define_hist_field(n_afsdn,"afsdn","1",tstr4Df, tcstr, & "areal floe size and thickness distribution", & "per unit bin width", c1, c0, ns, f_afsdn) endif ! if (histfreq(ns) /= 'x') then - enddo ! ns + enddo ! ns endif ! tr_fsd @@ -398,7 +397,7 @@ subroutine accum_hist_fsd (iblk) if (f_fsdrad(1:1) /= 'x') then do j = 1, ny_block do i = 1, nx_block - worka(i,j) = c0 + worka(i,j) = c0 if (aice_init(i,j,iblk) > puny) then do k = 1, nfsd_hist do n = 1, ncat_hist @@ -450,7 +449,7 @@ subroutine accum_hist_fsd (iblk) end do call accum_hist_field(n_afsd-n3Dacum, iblk, nfsd_hist, worke, a3Df) endif - + if (f_dafsd_newi(1:1)/= 'x') & call accum_hist_field(n_dafsd_newi-n3Dacum, iblk, nfsd_hist, & d_afsd_newi(:,:,1:nfsd_hist,iblk), a3Df) @@ -473,7 +472,7 @@ subroutine accum_hist_fsd (iblk) if (f_afsdn(1:1) /= 'x') then do n = 1, ncat_hist - do k = 1, nfsd_hist + do k = 1, nfsd_hist do j = 1, ny_block do i = 1, nx_block workd(i,j,k,n) = trcrn(i,j,nt_fsd+k-1,n,iblk) & diff --git a/cicecore/cicedynB/analysis/ice_history_mechred.F90 b/cicecore/cicedynB/analysis/ice_history_mechred.F90 index 920a83b47..98c58bc39 100644 --- a/cicecore/cicedynB/analysis/ice_history_mechred.F90 +++ b/cicecore/cicedynB/analysis/ice_history_mechred.F90 @@ -20,7 +20,7 @@ module ice_history_mechred implicit none private public :: accum_hist_mechred, init_hist_mechred_2D, init_hist_mechred_3Dc - + !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value !--------------------------------------------------------------- @@ -188,13 +188,13 @@ subroutine init_hist_mechred_2D "ice area ridging rate", & "none", secday*c100, c0, & ns, f_dardg1dt) - + if (f_dardg2dt(1:1) /= 'x') & call define_hist_field(n_dardg2dt,"dardg2dt","%/day",tstr2D, tcstr, & "ridge area formation rate", & "none", secday*c100, c0, & ns, f_dardg2dt) - + if (f_dvirdgdt(1:1) /= 'x') & call define_hist_field(n_dvirdgdt,"dvirdgdt","cm/day",tstr2D, tcstr, & "ice volume ridging rate", & diff --git a/cicecore/cicedynB/analysis/ice_history_pond.F90 b/cicecore/cicedynB/analysis/ice_history_pond.F90 index ef9a5237e..f6e4b8737 100644 --- a/cicecore/cicedynB/analysis/ice_history_pond.F90 +++ b/cicecore/cicedynB/analysis/ice_history_pond.F90 @@ -20,7 +20,7 @@ module ice_history_pond implicit none private public :: accum_hist_pond, init_hist_pond_2D, init_hist_pond_3Dc - + !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value !--------------------------------------------------------------- @@ -40,9 +40,9 @@ module ice_history_pond namelist / icefields_pond_nml / & f_apondn, f_apeffn , & f_hpondn, & - f_apond, f_apond_ai , & - f_hpond, f_hpond_ai , & - f_ipond, f_ipond_ai , & + f_apond, f_apond_ai , & + f_hpond, f_hpond_ai , & + f_ipond, f_ipond_ai , & f_apeff, f_apeff_ai !--------------------------------------------------------------- @@ -50,7 +50,7 @@ module ice_history_pond !--------------------------------------------------------------- integer (kind=int_kind), dimension(max_nstrm) :: & - n_apondn , n_apeffn , & + n_apondn , n_apeffn , & n_hpondn , & n_apond , n_apond_ai, & n_hpond , n_hpond_ai, & @@ -147,7 +147,7 @@ subroutine init_hist_pond_2D ns, f_apond) if (f_apond_ai(1:1) /= 'x') & - call define_hist_field(n_apond_ai,"apond_ai","1",tstr2D, tcstr, & + call define_hist_field(n_apond_ai,"apond_ai","1",tstr2D, tcstr, & "melt pond fraction of grid cell", & "weighted by ice area", c1, c0, & ns, f_apond_ai) @@ -159,7 +159,7 @@ subroutine init_hist_pond_2D ns, f_hpond) if (f_hpond_ai(1:1) /= 'x') & - call define_hist_field(n_hpond_ai,"hpond_ai","m",tstr2D, tcstr, & + call define_hist_field(n_hpond_ai,"hpond_ai","m",tstr2D, tcstr, & "mean melt pond depth over grid cell", & "weighted by ice area", c1, c0, & ns, f_hpond) @@ -171,7 +171,7 @@ subroutine init_hist_pond_2D ns, f_ipond) if (f_ipond_ai(1:1) /= 'x') & - call define_hist_field(n_ipond_ai,"ipond_ai","m",tstr2D, tcstr, & + call define_hist_field(n_ipond_ai,"ipond_ai","m",tstr2D, tcstr, & "mean pond ice thickness over grid cell", & "weighted by ice area", c1, c0, & ns, f_ipond_ai) @@ -192,7 +192,7 @@ subroutine init_hist_pond_2D enddo ! nstreams endif ! tr_pond - + end subroutine init_hist_pond_2D !======================================================================= @@ -212,14 +212,14 @@ subroutine init_hist_pond_3Dc file=__FILE__, line=__LINE__) if (tr_pond) then - + ! 3D (category) variables must be looped separately do ns = 1, nstreams if (histfreq(ns) /= 'x') then if (f_apondn(1:1) /= 'x') & call define_hist_field(n_apondn,"apondn","1",tstr3Dc, tcstr, & - "melt pond fraction, category","none", c1, c0, & + "melt pond fraction, category","none", c1, c0, & ns, f_apondn) if (f_hpondn(1:1) /= 'x') & @@ -376,7 +376,7 @@ subroutine accum_hist_pond (iblk) * trcr(:,:,nt_ipnd,iblk), a2D) endif ! ponds - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo diff --git a/cicecore/cicedynB/analysis/ice_history_shared.F90 b/cicecore/cicedynB/analysis/ice_history_shared.F90 index 66c4401c7..ee48a9996 100644 --- a/cicecore/cicedynB/analysis/ice_history_shared.F90 +++ b/cicecore/cicedynB/analysis/ice_history_shared.F90 @@ -2,17 +2,17 @@ ! ! Output files: netCDF or binary data, Fortran unformatted dumps ! -! The following variables are currently hard-wired as snapshots +! The following variables are currently hard-wired as snapshots ! (instantaneous rather than time-averages): ! divu, shear, sig1, sig2, sigP, trsig, mlt_onset, frz_onset, hisnap, aisnap ! ! Options for histfreq: '1','h','d','m','y','x', where x means that -! output stream will not be used (recommended for efficiency). -! histfreq_n can be any nonnegative integer, where 0 means that the +! output stream will not be used (recommended for efficiency). +! histfreq_n can be any nonnegative integer, where 0 means that the ! corresponding histfreq frequency will not be used. ! The flags (f_) can be set to '1','h','d','m','y' or 'x', where ! n means the field will not be written. To output the same field at -! more than one frequency, for instance monthy and daily, set +! more than one frequency, for instance monthy and daily, set ! f_ = 'md'. ! ! authors Tony Craig and Bruce Briegleb, NCAR @@ -34,7 +34,7 @@ module ice_history_shared private public :: define_hist_field, accum_hist_field, icefields_nml, construct_filename - + integer (kind=int_kind), public :: history_precision logical (kind=log_kind), public :: & @@ -142,7 +142,7 @@ module ice_history_shared a4Di(:,:,:,:,:,:), & ! field accumulations/averages, 4D categories,vertical, ice a4Ds(:,:,:,:,:,:), & ! field accumulations/averages, 4D categories,vertical, snow a4Df(:,:,:,:,:,:) ! field accumulations/averages, 4D floe size, thickness categories - + real (kind=dbl_kind), allocatable, public :: & Tinz4d (:,:,:,:) , & ! array for Tin Tsnz4d (:,:,:,:) , & ! array for Tsn @@ -199,7 +199,7 @@ module ice_history_shared nstr4Df = 'NLON NLAT NFSD NCAT', & ! vcoord for N cell, 4D, fsd estr4Df = 'ELON ELAT NFSD NCAT' ! vcoord for E cell, 4D, fsd !ferret -! tstr4Di = 'TLON TLAT VGRDi NCAT time', & ! ferret can not handle time +! tstr4Di = 'TLON TLAT VGRDi NCAT time', & ! ferret can not handle time ! ustr4Di = 'ULON ULAT VGRDi NCAT time', & ! index on 4D variables. ! tstr4Ds = 'TLON TLAT VGRDs NCAT time', & ! Use 'ferret' lines instead ! ustr4Ds = 'ULON ULAT VGRDs NCAT time', & ! (below also) @@ -367,10 +367,10 @@ module ice_history_shared f_keffn_top = 'x', & f_Tinz = 'x', f_Sinz = 'x', & f_Tsnz = 'x', & - f_a11 = 'x', f_a12 = 'x', & - f_e11 = 'x', f_e12 = 'x', & + f_a11 = 'x', f_a12 = 'x', & + f_e11 = 'x', f_e12 = 'x', & f_e22 = 'x', & - f_s11 = 'x', f_s12 = 'x', & + f_s11 = 'x', f_s12 = 'x', & f_s22 = 'x', & f_yieldstress11 = 'x', & f_yieldstress12 = 'x', & @@ -411,7 +411,7 @@ module ice_history_shared f_atmspd, f_atmdir , & f_fswup, & f_fswdn, f_flwdn , & - f_snow, f_snow_ai , & + f_snow, f_snow_ai , & f_rain, f_rain_ai , & f_sst, f_sss , & f_uocn, f_vocn , & @@ -436,8 +436,8 @@ module ice_history_shared f_snoice, f_dsnow , & f_meltt, f_melts , & f_meltb, f_meltl , & - f_fresh, f_fresh_ai , & - f_fsalt, f_fsalt_ai , & + f_fresh, f_fresh_ai , & + f_fsalt, f_fsalt_ai , & f_fbot, & f_fhocn, f_fhocn_ai , & f_fswthru, f_fswthru_ai,& @@ -715,7 +715,7 @@ module ice_history_shared n_trsig , n_icepresent , & n_iage , n_FY , & n_fsurf_ai , & - n_fcondtop_ai, n_fmeltt_ai , & + n_fcondtop_ai, n_fmeltt_ai , & n_aicen , n_vicen , & n_fsurfn_ai , & n_fcondtopn_ai, & @@ -765,7 +765,7 @@ subroutine construct_filename(ncfile,suffix,ns) iyear = myear imonth = mmonth iday = mday - isec = msec - dt + isec = int(msec - dt,int_kind) ! construct filename if (write_ic) then @@ -863,7 +863,7 @@ subroutine define_hist_field(id, vname, vunit, vcoord, vcellmeas, & character (len=*), intent(in) :: & vhistfreq ! history frequency - + integer (kind=int_kind), intent(in) :: & ns ! history file stream index @@ -970,7 +970,7 @@ subroutine accum_hist_field_2D(id, iblk, field_accum, field) integer (int_kind), dimension(:), intent(in) :: & ! max_nstrm id ! location in avail_fields array for use in ! later routines - + integer (kind=int_kind), intent(in) :: iblk real (kind=dbl_kind), intent(in) :: & @@ -1030,7 +1030,7 @@ subroutine accum_hist_field_3D(id, iblk, ndim, field_accum, field) integer (int_kind), dimension(:), intent(in) :: & ! max_nstrm id ! location in avail_fields array for use in ! later routines - + integer (kind=int_kind), intent(in) :: iblk integer (kind=int_kind), intent(in) :: & @@ -1095,7 +1095,7 @@ subroutine accum_hist_field_4D(id, iblk, ndim3, ndim4, field_accum, field) integer (int_kind), dimension(:), intent(in) :: & ! max_nstrm id ! location in avail_fields array for use in ! later routines - + integer (kind=int_kind), intent(in) :: iblk integer (kind=int_kind), intent(in) :: & diff --git a/cicecore/cicedynB/analysis/ice_history_snow.F90 b/cicecore/cicedynB/analysis/ice_history_snow.F90 index 090759759..0ec4144bf 100644 --- a/cicecore/cicedynB/analysis/ice_history_snow.F90 +++ b/cicecore/cicedynB/analysis/ice_history_snow.F90 @@ -18,7 +18,7 @@ module ice_history_snow implicit none private public :: accum_hist_snow, init_hist_snow_2D, init_hist_snow_3Dc - + !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value !--------------------------------------------------------------- @@ -193,7 +193,7 @@ subroutine init_hist_snow_2D (dt) endif ! histfreq(ns) /= 'x' enddo ! nstreams endif ! tr_snow - + end subroutine init_hist_snow_2D !======================================================================= @@ -206,7 +206,7 @@ subroutine init_hist_snow_3Dc integer (kind=int_kind) :: ns logical (kind=log_kind) :: tr_snow character(len=*), parameter :: subname = '(init_hist_pond_3Dc)' - + call icepack_query_tracer_flags(tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -263,7 +263,6 @@ subroutine accum_hist_snow (iblk) use ice_arrays_column, only: meltsliq use ice_blocks, only: block, nx_block, ny_block - use ice_domain, only: blocks_ice use ice_flux, only: fsloss use ice_history_shared, only: n2D, a2D, a3Dc, ncat_hist, & accum_hist_field, nzslyr @@ -275,7 +274,7 @@ subroutine accum_hist_snow (iblk) ! local variables integer (kind=int_kind) :: & - i, j, k, n + k, n integer (kind=int_kind) :: & nt_smice, nt_smliq, nt_rhos, nt_rsnw @@ -356,7 +355,7 @@ subroutine accum_hist_snow (iblk) if (f_fsloss(1:1)/= 'x') & call accum_hist_field(n_fsloss, iblk, & fsloss(:,:,iblk), a2D) - + endif ! allocated(a2D) ! 3D category fields @@ -422,7 +421,7 @@ subroutine accum_hist_snow (iblk) endif ! allocated(a3Dc) endif ! tr_snow - + end subroutine accum_hist_snow !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 5cf0b5dbc..f71d959da 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -1271,7 +1271,7 @@ subroutine stress_eap (nx_block, ny_block, & csigmne, csigmnw, csigmse, csigmsw , & csig12ne, csig12nw, csig12se, csig12sw , & str12ew, str12we, str12ns, str12sn , & - strp_tmp, strm_tmp + strp_tmp, strm_tmp real (kind=dbl_kind) :: & alpharne, alpharnw, alpharsw, alpharse, & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index ecd283642..c2060285a 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -98,12 +98,12 @@ subroutine evp (dt) stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U - use ice_grid, only: hm, tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & + use ice_grid, only: tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & iceumask, iceemask, icenmask, & dxE, dxN, dxT, dxU, dyE, dyN, dyT, dyU, & ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, earear, narear, grid_average_X2Y, tarea, uarea, & + tarear, uarear, earear, narear, grid_average_X2Y, uarea, & grid_type, grid_ice, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & @@ -871,7 +871,7 @@ subroutine evp (dt) shearU (:,:,iblk), & shear (:,:,iblk), divu (:,:,iblk), & rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) - + endif enddo !$OMP END PARALLEL DO @@ -1408,7 +1408,7 @@ subroutine stress (nx_block, ny_block, & csigmne, csigmnw, csigmse, csigmsw , & csig12ne, csig12nw, csig12se, csig12sw , & str12ew, str12we, str12ns, str12sn , & - strp_tmp, strm_tmp, tmp + strp_tmp, strm_tmp character(len=*), parameter :: subname = '(stress)' diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 index 2f5389d06..fe04a3d63 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 @@ -779,8 +779,7 @@ subroutine stepu_last(NA_len, rhow, lb, ub, Cw, aiu, uocn, vocn, & use ice_kinds_mod use ice_constants, only : c0, c1 - use ice_dyn_shared, only : brlx, revp, u0, cosw, sinw, & - seabed_stress + use ice_dyn_shared, only : brlx, revp, u0, cosw, sinw implicit none diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 237861c60..95d2eedb1 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -1896,7 +1896,7 @@ subroutine deformationsC_T (nx_block, ny_block, & !----------------------------------------------------------------- ! deformations for mechanical redistribution !----------------------------------------------------------------- - + shearTsqr = (shearU(i ,j )**2 * uarea(i ,j ) & + shearU(i ,j-1)**2 * uarea(i ,j-1) & + shearU(i-1,j-1)**2 * uarea(i-1,j-1) & @@ -2326,7 +2326,7 @@ end subroutine visc_replpress subroutine dyn_haloUpdate1(halo_info, halo_info_mask, field_loc, field_type, fld1) use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_domain, only: maskhalo_dyn, halo_dynbundle use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & @@ -2342,12 +2342,6 @@ subroutine dyn_haloUpdate1(halo_info, halo_info_mask, field_loc, field_type, fld ! local variables - integer (kind=int_kind) :: & - iblk ! iblock - - real (kind=dbl_kind), dimension (nx_block,ny_block,1,max_blocks) :: & - fldbundle ! work array for boundary updates - character(len=*), parameter :: subname = '(dyn_haloUpdate1)' call ice_timer_start(timer_bound) @@ -2370,7 +2364,7 @@ end subroutine dyn_haloUpdate1 subroutine dyn_haloUpdate2(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2) use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_domain, only: maskhalo_dyn, halo_dynbundle use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & @@ -2387,9 +2381,6 @@ subroutine dyn_haloUpdate2(halo_info, halo_info_mask, field_loc, field_type, fld ! local variables - integer (kind=int_kind) :: & - iblk ! iblock - real (kind=dbl_kind), dimension (nx_block,ny_block,2,max_blocks) :: & fldbundle ! work array for boundary updates @@ -2434,7 +2425,7 @@ end subroutine dyn_haloUpdate2 subroutine dyn_haloUpdate3(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3) use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_domain, only: maskhalo_dyn, halo_dynbundle use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & @@ -2452,9 +2443,6 @@ subroutine dyn_haloUpdate3(halo_info, halo_info_mask, field_loc, field_type, fld ! local variables - integer (kind=int_kind) :: & - iblk ! iblock - real (kind=dbl_kind), dimension (nx_block,ny_block,3,max_blocks) :: & fldbundle ! work array for boundary updates @@ -2503,7 +2491,7 @@ end subroutine dyn_haloUpdate3 subroutine dyn_haloUpdate4(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3, fld4) use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_domain, only: maskhalo_dyn, halo_dynbundle use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & @@ -2522,9 +2510,6 @@ subroutine dyn_haloUpdate4(halo_info, halo_info_mask, field_loc, field_type, fld ! local variables - integer (kind=int_kind) :: & - iblk ! iblock - real (kind=dbl_kind), dimension (nx_block,ny_block,4,max_blocks) :: & fldbundle ! work array for boundary updates @@ -2577,7 +2562,7 @@ end subroutine dyn_haloUpdate4 subroutine dyn_haloUpdate5(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3, fld4, fld5) use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: nblocks, maskhalo_dyn, halo_dynbundle + use ice_domain, only: maskhalo_dyn, halo_dynbundle use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound type (ice_halo), intent(in) :: & @@ -2597,9 +2582,6 @@ subroutine dyn_haloUpdate5(halo_info, halo_info_mask, field_loc, field_type, fld ! local variables - integer (kind=int_kind) :: & - iblk ! iblock - real (kind=dbl_kind), dimension (nx_block,ny_block,5,max_blocks) :: & fldbundle ! work array for boundary updates diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 7e0bdb745..17fd0b73f 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -69,7 +69,8 @@ module ice_dyn_vp dim_pgmres , & ! size of pgmres Krylov subspace maxits_fgmres , & ! max nb of iteration for fgmres maxits_pgmres , & ! max nb of iteration for pgmres - fpfunc_andacc , & ! fixed point function for Anderson acceleration: 1: g(x) = FMGRES(A(x),b(x)), 2: g(x) = x - A(x)x + b(x) + fpfunc_andacc , & ! fixed point function for Anderson acceleration: + ! 1: g(x) = FMGRES(A(x),b(x)), 2: g(x) = x - A(x)x + b(x) dim_andacc , & ! size of Anderson minimization matrix (number of saved previous residuals) start_andacc ! acceleration delay factor (acceleration starts at this iteration) @@ -87,7 +88,8 @@ module ice_dyn_vp reltol_andacc ! relative tolerance for Anderson acceleration character (len=char_len), public :: & - precond , & ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), 'pgmres' (Jacobi-preconditioned GMRES) + precond , & ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), + ! 'pgmres' (Jacobi-preconditioned GMRES) algo_nonlin , & ! nonlinear algorithm: 'picard' (Picard iteration), 'anderson' (Anderson acceleration) ortho_type ! type of orthogonalization for FGMRES ('cgs' or 'mgs') @@ -1095,7 +1097,8 @@ subroutine anderson_solver (icellt , icellu , & endif #else ! Anderson solver is not usable without LAPACK; abort - call abort_ice(error_message=subname // " CICE was not compiled with LAPACK, and Anderson solver was chosen (algo_nonlin = 'anderson')" , & + call abort_ice(error_message=subname // " CICE was not compiled with LAPACK, "// & + "and Anderson solver was chosen (algo_nonlin = 'anderson')" , & file=__FILE__, line=__LINE__) #endif endif diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 index 390631eaa..43fe5af13 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 @@ -19,6 +19,7 @@ module ice_transport_driver field_type_scalar, field_type_vector, & field_loc_NEcorner, & field_loc_Nface, field_loc_Eface + use ice_diagnostics, only: diagnostic_abort use ice_fileunits, only: nu_diag use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted @@ -690,7 +691,7 @@ subroutine transport_remap (dt) if (ckflag) then write (nu_diag,*) 'istep1, my_task, iblk, cat =', & istep1, my_task, iblk, n - call abort_ice(subname//'ERROR: monotonicity error') + call diagnostic_abort(istop,jstop,iblk,' monotonicity error') endif enddo ! n diff --git a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 index 330816529..6fd037b7b 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 @@ -30,11 +30,13 @@ module ice_transport_remap use ice_kinds_mod use ice_blocks, only: nx_block, ny_block + use ice_calendar, only: istep1 use ice_communicate, only: my_task use ice_constants, only: c0, c1, c2, c12, p333, p4, p5, p6, & eps13, eps16, & field_loc_center, field_type_scalar, & field_loc_NEcorner, field_type_vector + use ice_diagnostics, only: diagnostic_abort use ice_domain_size, only: max_blocks, ncat use ice_fileunits, only: nu_diag use ice_exit, only: abort_ice @@ -329,7 +331,6 @@ subroutine horizontal_remap (dt, ntrace, & tarear, hm, & xav, yav, xxav, yyav ! xyav, xxxav, xxyav, xyyav, yyyav - use ice_calendar, only: istep1 use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound real (kind=dbl_kind), intent(in) :: & @@ -556,14 +557,7 @@ subroutine horizontal_remap (dt, ntrace, & istop, jstop) if (l_stop) then - write(nu_diag,*) 'istep1, my_task, iblk =', & - istep1, my_task, iblk - write (nu_diag,*) 'Global block:', this_block%block_id - if (istop > 0 .and. jstop > 0) & - write(nu_diag,*) 'Global i and j:', & - this_block%i_glob(istop), & - this_block%j_glob(jstop) - call abort_ice(subname//'ERROR: bad departure points') + call diagnostic_abort(istop,jstop,iblk,'bad departure points') endif enddo ! iblk @@ -832,15 +826,7 @@ subroutine horizontal_remap (dt, ntrace, & mm (:,:,0,iblk)) if (l_stop) then - this_block = get_block(blocks_ice(iblk),iblk) - write (nu_diag,*) 'istep1, my_task, iblk, cat =', & - istep1, my_task, iblk, '0' - write (nu_diag,*) 'Global block:', this_block%block_id - if (istop > 0 .and. jstop > 0) & - write(nu_diag,*) 'Global i and j:', & - this_block%i_glob(istop), & - this_block%j_glob(jstop) - call abort_ice (subname//'ERROR: negative area (open water)') + call diagnostic_abort(istop,jstop,iblk,'negative area (open water)') endif ! ice categories @@ -860,12 +846,7 @@ subroutine horizontal_remap (dt, ntrace, & if (l_stop) then write (nu_diag,*) 'istep1, my_task, iblk, cat =', & istep1, my_task, iblk, n - write (nu_diag,*) 'Global block:', this_block%block_id - if (istop > 0 .and. jstop > 0) & - write(nu_diag,*) 'Global i and j:', & - this_block%i_glob(istop), & - this_block%j_glob(jstop) - call abort_ice (subname//'ERROR: negative area (ice)') + call diagnostic_abort(istop,jstop,iblk,'negative area (ice)') endif enddo ! n diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index 845491d2a..a7e5aa584 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -311,7 +311,7 @@ module ice_flux mlt_onset, &! day of year that sfc melting begins frz_onset, &! day of year that freezing begins (congel or frazil) frazil_diag ! frazil ice growth diagnostic (m/step-->cm/day) - + real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & fsurfn, & ! category fsurf @@ -330,7 +330,7 @@ module ice_flux ! ice diagnostics and history files as these are more accurate. ! (The others suffer from problem of incorrect values at grid boxes ! that change from an ice free state to an icy state.) - + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & fresh_ai, & ! fresh water flux to ocean (kg/m^2/s) fsalt_ai, & ! salt flux to ocean (kg/m^2/s) @@ -344,7 +344,7 @@ module ice_flux real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & fswthrun_ai ! per-category fswthru * ai (W/m^2) - + logical (kind=log_kind), public :: send_i2x_per_cat = .false. !----------------------------------------------------------------- @@ -360,7 +360,7 @@ module ice_flux coszen , & ! cosine solar zenith angle, < 0 for sun below horizon rdg_conv, & ! convergence term for ridging (1/s) rdg_shear ! shear term for ridging (1/s) - + real (kind=dbl_kind), dimension(:,:,:,:), allocatable, public :: & salinz ,& ! initial salinity profile (ppt) Tmltz ! initial melting temperature (^oC) @@ -448,7 +448,8 @@ subroutine alloc_flux Tf (nx_block,ny_block,max_blocks), & ! freezing temperature (C) qdp (nx_block,ny_block,max_blocks), & ! deep ocean heat flux (W/m^2), negative upward hmix (nx_block,ny_block,max_blocks), & ! mixed layer depth (m) - daice_da (nx_block,ny_block,max_blocks), & ! data assimilation concentration increment rate (concentration s-1)(only used in hadgem drivers) + daice_da (nx_block,ny_block,max_blocks), & ! data assimilation concentration increment rate (concentration s-1) + ! (only used in hadgem drivers) fsens (nx_block,ny_block,max_blocks), & ! sensible heat flux (W/m^2) flat (nx_block,ny_block,max_blocks), & ! latent heat flux (W/m^2) fswabs (nx_block,ny_block,max_blocks), & ! shortwave flux absorbed in ice and ocean (W/m^2) @@ -791,7 +792,7 @@ subroutine init_coupler_flux fdon (:,:,:,:)= c0 ffep (:,:,:,:)= c0 ffed (:,:,:,:)= c0 - + allocate(fswthrun_ai(nx_block,ny_block,ncat,max_blocks)) fswthrun_ai(:,:,:,:) = c0 @@ -1278,7 +1279,7 @@ subroutine scale_fluxes (nx_block, ny_block, & ! Scale fluxes for history output if (present(fsurf) .and. present(fcondtop) ) then - + do j = 1, ny_block do i = 1, nx_block if (tmask(i,j) .and. aice(i,j) > c0) then @@ -1291,9 +1292,9 @@ subroutine scale_fluxes (nx_block, ny_block, & endif ! tmask and aice > 0 enddo ! i enddo ! j - + endif ! present(fsurf & fcondtop) - + end subroutine scale_fluxes !======================================================================= diff --git a/cicecore/cicedynB/general/ice_flux_bgc.F90 b/cicecore/cicedynB/general/ice_flux_bgc.F90 index 56e644431..0d9184fb7 100644 --- a/cicecore/cicedynB/general/ice_flux_bgc.F90 +++ b/cicecore/cicedynB/general/ice_flux_bgc.F90 @@ -26,13 +26,13 @@ module ice_flux_bgc real (kind=dbl_kind), & ! coupling variable for both tr_aero and tr_zaero dimension (:,:,:,:), allocatable, public :: & fiso_atm, & ! isotope deposition rate (kg/m^2 s) - faero_atm ! aerosol deposition rate (kg/m^2 s) + faero_atm ! aerosol deposition rate (kg/m^2 s) real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & flux_bio_atm ! all bio fluxes to ice from atmosphere - ! out to ocean + ! out to ocean real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & @@ -45,8 +45,8 @@ module ice_flux_bgc flux_bio_ai ! all bio fluxes to ocean, averaged over grid cell real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - fzsal_ai, & ! salt flux to ocean from zsalinity (kg/m^2/s) - fzsal_g_ai ! gravity drainage salt flux to ocean (kg/m^2/s) + fzsal_ai, & ! salt flux to ocean from zsalinity (kg/m^2/s) + fzsal_g_ai ! gravity drainage salt flux to ocean (kg/m^2/s) ! internal @@ -58,7 +58,7 @@ module ice_flux_bgc dsnown ! change in snow thickness in category n (m) real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - nit , & ! ocean nitrate (mmol/m^3) + nit , & ! ocean nitrate (mmol/m^3) amm , & ! ammonia/um (mmol/m^3) sil , & ! silicate (mmol/m^3) dmsp , & ! dmsp (mmol/m^3) @@ -85,15 +85,15 @@ module ice_flux_bgc fdon ! ice-ocean don flux (mmol/m^2/s) (proteins and amino acids) real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & - dic , & ! ocean dic (mmol/m^3) - fdic ! ice-ocean dic flux (mmol/m^2/s) + dic , & ! ocean dic (mmol/m^3) + fdic ! ice-ocean dic flux (mmol/m^2/s) real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & - fed, fep , & ! ocean dissolved and particulate fe (nM) - ffed, ffep ! ice-ocean dissolved and particulate fe flux (umol/m^2/s) + fed, fep , & ! ocean dissolved and particulate fe (nM) + ffed, ffep ! ice-ocean dissolved and particulate fe flux (umol/m^2/s) real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & - zaeros ! ocean aerosols (mmol/m^3) + zaeros ! ocean aerosols (mmol/m^3) ! isotopes real (kind=dbl_kind), & ! coupling variable for tr_iso @@ -114,16 +114,16 @@ module ice_flux_bgc !======================================================================= ! -! Allocate space for all variables +! Allocate space for all variables ! subroutine alloc_flux_bgc integer (int_kind) :: ierr allocate( & - fzsal_ai (nx_block,ny_block,max_blocks), & ! salt flux to ocean from zsalinity (kg/m^2/s) - fzsal_g_ai (nx_block,ny_block,max_blocks), & ! gravity drainage salt flux to ocean (kg/m^2/s) - nit (nx_block,ny_block,max_blocks), & ! ocean nitrate (mmol/m^3) + fzsal_ai (nx_block,ny_block,max_blocks), & ! salt flux to ocean from zsalinity (kg/m^2/s) + fzsal_g_ai (nx_block,ny_block,max_blocks), & ! gravity drainage salt flux to ocean (kg/m^2/s) + nit (nx_block,ny_block,max_blocks), & ! ocean nitrate (mmol/m^3) amm (nx_block,ny_block,max_blocks), & ! ammonia/um (mmol/m^3) sil (nx_block,ny_block,max_blocks), & ! silicate (mmol/m^3) dmsp (nx_block,ny_block,max_blocks), & ! dmsp (mmol/m^3) @@ -138,32 +138,32 @@ subroutine alloc_flux_bgc fdust (nx_block,ny_block,max_blocks), & ! ice-ocean dust flux (kg/m^2/s), positive to ocean hin_old (nx_block,ny_block,ncat,max_blocks), & ! old ice thickness dsnown (nx_block,ny_block,ncat,max_blocks), & ! change in snow thickness in category n (m) - HDO_ocn (nx_block,ny_block,max_blocks), & ! seawater concentration of HDO (kg/kg) - H2_16O_ocn (nx_block,ny_block,max_blocks), & ! seawater concentration of H2_16O (kg/kg) - H2_18O_ocn (nx_block,ny_block,max_blocks), & ! seawater concentration of H2_18O (kg/kg) - Qa_iso (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope specific humidity (kg/kg) - Qref_iso (nx_block,ny_block,icepack_max_iso,max_blocks), & ! 2m atm reference isotope spec humidity (kg/kg) - fiso_atm (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope deposition rate (kg/m^2 s) - fiso_evap (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope evaporation rate (kg/m^2 s) - fiso_ocn (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope flux to ocean (kg/m^2/s) - faero_atm (nx_block,ny_block,icepack_max_aero,max_blocks), & ! aerosol deposition rate (kg/m^2 s) + HDO_ocn (nx_block,ny_block,max_blocks), & ! seawater concentration of HDO (kg/kg) + H2_16O_ocn (nx_block,ny_block,max_blocks), & ! seawater concentration of H2_16O (kg/kg) + H2_18O_ocn (nx_block,ny_block,max_blocks), & ! seawater concentration of H2_18O (kg/kg) + Qa_iso (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope specific humidity (kg/kg) + Qref_iso (nx_block,ny_block,icepack_max_iso,max_blocks), & ! 2m atm reference isotope spec humidity (kg/kg) + fiso_atm (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope deposition rate (kg/m^2 s) + fiso_evap (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope evaporation rate (kg/m^2 s) + fiso_ocn (nx_block,ny_block,icepack_max_iso,max_blocks), & ! isotope flux to ocean (kg/m^2/s) + faero_atm (nx_block,ny_block,icepack_max_aero,max_blocks), & ! aerosol deposition rate (kg/m^2 s) faero_ocn (nx_block,ny_block,icepack_max_aero,max_blocks), & ! aerosol flux to ocean (kg/m^2/s) - zaeros (nx_block,ny_block,icepack_max_aero,max_blocks), & ! ocean aerosols (mmol/m^3) + zaeros (nx_block,ny_block,icepack_max_aero,max_blocks), & ! ocean aerosols (mmol/m^3) flux_bio_atm(nx_block,ny_block,icepack_max_nbtrcr,max_blocks), & ! all bio fluxes to ice from atmosphere flux_bio (nx_block,ny_block,icepack_max_nbtrcr,max_blocks), & ! all bio fluxes to ocean flux_bio_ai (nx_block,ny_block,icepack_max_nbtrcr,max_blocks), & ! all bio fluxes to ocean, averaged over grid cell algalN (nx_block,ny_block,icepack_max_algae,max_blocks), & ! ocean algal nitrogen (mmol/m^3) (diatoms, pico, phaeo) - falgalN (nx_block,ny_block,icepack_max_algae,max_blocks), & ! ice-ocean algal nitrogen flux (mmol/m^2/s) (diatoms, pico, phaeo) + falgalN (nx_block,ny_block,icepack_max_algae,max_blocks), & ! ice-ocn algalN flux (mmol/m^2/s) (diatoms, pico, phaeo) doc (nx_block,ny_block,icepack_max_doc,max_blocks), & ! ocean doc (mmol/m^3) (saccharids, lipids, tbd ) fdoc (nx_block,ny_block,icepack_max_doc,max_blocks), & ! ice-ocean doc flux (mmol/m^2/s) (saccharids, lipids, tbd) don (nx_block,ny_block,icepack_max_don,max_blocks), & ! ocean don (mmol/m^3) (proteins and amino acids) fdon (nx_block,ny_block,icepack_max_don,max_blocks), & ! ice-ocean don flux (mmol/m^2/s) (proteins and amino acids) - dic (nx_block,ny_block,icepack_max_dic,max_blocks), & ! ocean dic (mmol/m^3) - fdic (nx_block,ny_block,icepack_max_dic,max_blocks), & ! ice-ocean dic flux (mmol/m^2/s) - fed (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ocean dissolved fe (nM) - fep (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ocean particulate fe (nM) - ffed (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ice-ocean dissolved fe flux (umol/m^2/s) - ffep (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ice-ocean particulate fe flux (umol/m^2/s) + dic (nx_block,ny_block,icepack_max_dic,max_blocks), & ! ocean dic (mmol/m^3) + fdic (nx_block,ny_block,icepack_max_dic,max_blocks), & ! ice-ocean dic flux (mmol/m^2/s) + fed (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ocean dissolved fe (nM) + fep (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ocean particulate fe (nM) + ffed (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ice-ocean dissolved fe flux (umol/m^2/s) + ffep (nx_block,ny_block,icepack_max_fe, max_blocks), & ! ice-ocean particulate fe flux (umol/m^2/s) stat=ierr) if (ierr/=0) call abort_ice('(alloc_flux_bgc): Out of memory') @@ -214,10 +214,10 @@ subroutine bgcflux_ice_to_ocn(nx_block, & ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & i,j , & ! horizontal indices k ! tracer index - + logical (kind=log_kind) :: & skl_bgc, solve_zbgc, & tr_bgc_Nit, tr_bgc_N, & @@ -226,14 +226,14 @@ subroutine bgcflux_ice_to_ocn(nx_block, & integer (kind=int_kind) :: & nlt_bgc_Nit, nlt_bgc_Am, & - nlt_bgc_Sil, nlt_bgc_DMSPd, nlt_bgc_DMS, nlt_bgc_hum + nlt_bgc_Sil, nlt_bgc_DMSPd, nlt_bgc_DMS, nlt_bgc_hum integer (kind=int_kind), dimension(icepack_max_algae) :: & nlt_bgc_N, nlt_bgc_C ! algae integer (kind=int_kind), dimension(icepack_max_doc) :: & nlt_bgc_DOC ! disolved organic carbon integer (kind=int_kind), dimension(icepack_max_don) :: & - nlt_bgc_DON ! + nlt_bgc_DON ! integer (kind=int_kind), dimension(icepack_max_dic) :: & nlt_bgc_DIC ! disolved inorganic carbon integer (kind=int_kind), dimension(icepack_max_fe) :: & diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index 36dbfe88c..edff03b9f 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -76,7 +76,7 @@ module ice_forcing sst_file, & sss_file, & sublim_file, & - snow_file + snow_file character (char_len_long), dimension(:), allocatable, public :: & ! input data file names topmelt_file, & @@ -106,7 +106,7 @@ module ice_forcing rhoa_data, & flw_data, & sst_data, & - sss_data, & + sss_data, & uocn_data, & vocn_data, & sublim_data, & @@ -116,7 +116,7 @@ module ice_forcing topmelt_data, & botmelt_data - character(char_len), public :: & + character(char_len), public :: & atm_data_format, & ! 'bin'=binary or 'nc'=netcdf ocn_data_format, & ! 'bin'=binary or 'nc'=netcdf atm_data_type, & ! 'default', 'monthly', 'ncar', 'box2001' @@ -132,15 +132,15 @@ module ice_forcing logical (kind=log_kind), public :: & rotate_wind ! rotate wind/stress to computational grid from true north directed - - character(char_len_long), public :: & + + character(char_len_long), public :: & atm_data_dir , & ! top directory for atmospheric data ocn_data_dir , & ! top directory for ocean data wave_spec_dir, & ! dir name for wave spectrum wave_spec_file,& ! file name for wave spectrum oceanmixed_file ! file name for ocean forcing data - integer (kind=int_kind), parameter :: & + integer (kind=int_kind), parameter :: & nfld = 8 ! number of fields to search for in forcing file ! as in the dummy atm (latm) @@ -159,7 +159,7 @@ module ice_forcing integer (kind=int_kind), public :: & trestore ! restoring time scale (days) - real (kind=dbl_kind), public :: & + real (kind=dbl_kind), public :: & trest ! restoring time scale (sec) logical (kind=log_kind), public :: & @@ -196,7 +196,7 @@ module ice_forcing !======================================================================= ! -! Allocate space for all variables +! Allocate space for all variables ! subroutine alloc_forcing integer (int_kind) :: ierr @@ -288,7 +288,7 @@ subroutine init_forcing_atmo endif !------------------------------------------------------------------- - ! Get filenames for input forcing data + ! Get filenames for input forcing data !------------------------------------------------------------------- ! default forcing values from init_flux_atm @@ -310,7 +310,7 @@ subroutine init_forcing_atmo call monthly_files(fyear) elseif (trim(atm_data_type) == 'oned') then call oned_files - elseif (trim(atm_data_type) == 'ISPOL') then + elseif (trim(atm_data_type) == 'ISPOL') then call ISPOL_files elseif (trim(atm_data_type) == 'box2001') then call box2001_data_atm @@ -331,7 +331,8 @@ subroutine init_forcing_atmo elseif (trim(atm_data_type) == 'default') then ! don't need to do anything more else - call abort_ice (error_message=subname//' ERROR atm_data_type unknown = '//trim(atm_data_type), file=__FILE__, line=__LINE__) + call abort_ice (error_message=subname//' ERROR atm_data_type unknown = '// & + trim(atm_data_type), file=__FILE__, line=__LINE__) endif end subroutine init_forcing_atmo @@ -340,13 +341,13 @@ end subroutine init_forcing_atmo subroutine init_forcing_ocn(dt) -! Set sea surface salinity and freezing temperature to annual mean value +! Set sea surface salinity and freezing temperature to annual mean value ! using a 12-month climatology. ! Read sst data for current month, and adjust sst based on freezing ! temperature. No interpolation in time. -! Note: SST is subsequently prognosed if CICE is run -! with a mixed layer ocean (oceanmixed_ice = T), and can be +! Note: SST is subsequently prognosed if CICE is run +! with a mixed layer ocean (oceanmixed_ice = T), and can be ! restored to data (restore_ocn = T). use ice_blocks, only: nx_block, ny_block @@ -362,14 +363,14 @@ subroutine init_forcing_ocn(dt) integer (kind=int_kind) :: & i, j, iblk , & ! horizontal indices k , & ! month index - fid , & ! file id for netCDF file + fid , & ! file id for netCDF file nbits logical (kind=log_kind) :: diag real (kind=dbl_kind) :: secday - character (char_len) :: & + character (char_len) :: & fieldname ! field name in netcdf file real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & @@ -488,7 +489,7 @@ subroutine init_forcing_ocn(dt) elseif (trim(ocn_data_type) == 'hadgem_sst' .or. & trim(ocn_data_type) == 'hadgem_sst_uvocn') then - diag = .true. ! write diagnostic information + diag = .true. ! write diagnostic information sst_file = trim (ocn_data_dir)//'/MONTHLY/sst.1997.nc' @@ -500,11 +501,11 @@ subroutine init_forcing_ocn(dt) call ice_open_nc(sst_file,fid) endif - + fieldname='sst' call ice_read_nc(fid,mmonth,fieldname,sst,diag) - if (my_task == master_task) call ice_close_nc(fid) + if (my_task == master_task) call ice_close_nc(fid) ! Make sure sst is not less than freezing temperature Tf !$OMP PARALLEL DO PRIVATE(iblk,i,j) @@ -539,7 +540,8 @@ subroutine init_forcing_ocn(dt) elseif (trim(ocn_data_type) == 'default') then ! don't need to do anything more else - call abort_ice (error_message=subname//' ERROR ocn_data_type unknown = '//trim(ocn_data_type), file=__FILE__, line=__LINE__) + call abort_ice (error_message=subname//' ERROR ocn_data_type unknown = '// & + trim(ocn_data_type), file=__FILE__, line=__LINE__) endif end subroutine init_forcing_ocn @@ -694,7 +696,7 @@ subroutine get_forcing_atmo ilo, ihi, jlo, jhi, & hm (:,:,iblk), & Tair (:,:,iblk), & - fsw (:,:,iblk), & + fsw (:,:,iblk), & cldf (:,:,iblk), & flw (:,:,iblk), & frain (:,:,iblk), & @@ -761,10 +763,10 @@ subroutine get_forcing_ocn (dt) call ocn_data_clim(dt) elseif (trim(ocn_data_type) == 'ncar' .or. & trim(ocn_data_type) == 'ISPOL') then - call ocn_data_ncar(dt) + call ocn_data_ncar(dt) elseif (trim(ocn_data_type) == 'hadgem_sst' .or. & trim(ocn_data_type) == 'hadgem_sst_uvocn') then - call ocn_data_hadgem(dt) + call ocn_data_hadgem(dt) elseif (trim(ocn_data_type) == 'oned') then call ocn_data_oned elseif (trim(ocn_data_type) == 'hycom') then @@ -1039,7 +1041,7 @@ subroutine read_data_nc (flag, recd, yr, ixm, ixx, ixp, & arg = 1 nrec = recd + n2 - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), debug_forcing, & field_loc, field_type) @@ -1053,7 +1055,7 @@ subroutine read_data_nc (flag, recd, yr, ixm, ixx, ixp, & arg = arg + 1 nrec = recd + ixx - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), debug_forcing, & field_loc, field_type) @@ -1079,7 +1081,7 @@ subroutine read_data_nc (flag, recd, yr, ixm, ixx, ixp, & arg = arg + 1 nrec = recd + n4 - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), debug_forcing, & field_loc, field_type) endif ! ixp /= -99 @@ -1311,21 +1313,21 @@ subroutine read_clim_data_nc (readflag, recd, ixm, ixx, ixp, & if (ixm /= -99) then arg = 1 nrec = recd + ixm - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), & debug_forcing, field_loc, field_type) endif arg = arg + 1 nrec = recd + ixx - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), & debug_forcing, field_loc, field_type) if (ixp /= -99) then arg = arg + 1 nrec = recd + ixp - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), & debug_forcing, field_loc, field_type) endif @@ -1449,7 +1451,7 @@ subroutine interp_coeff (recnum, recslot, secint, dataloc) else ! recslot = 1 if (dataloc==1) then ! data located at middle of interval t1 = (rcnum-p5)*secint - else + else t1 = rcnum*secint ! data located at end of interval endif t2 = t1 + secint ! + 1 interval @@ -1574,7 +1576,7 @@ end subroutine file_year subroutine prepare_forcing (nx_block, ny_block, & ilo, ihi, jlo, jhi, & hm, & - Tair, fsw, & + Tair, fsw, & cldf, flw, & frain, fsnow, & Qa, rhoa, & @@ -1597,7 +1599,7 @@ subroutine prepare_forcing (nx_block, ny_block, & sst , & ! sea surface temperature aice , & ! ice area fraction hm ! land mask - + real (kind=dbl_kind), dimension(nx_block,ny_block), intent(inout) :: & fsw , & ! incoming shortwave radiation (W/m^2) cldf , & ! cloud fraction @@ -1654,7 +1656,7 @@ subroutine prepare_forcing (nx_block, ny_block, & rhoa (i,j) = max(rhoa(i,j),c0) Qa (i,j) = max(Qa(i,j),c0) -! if (rhoa(i,j) .lt. puny) rhoa(i,j) = 1.3_dbl_kind +! if (rhoa(i,j) .lt. puny) rhoa(i,j) = 1.3_dbl_kind ! if (Tair(i,j) .lt. puny) Tair(i,j) = Tffresh ! if (Qa(i,j) .lt. puny) Qa(i,j) = 0.0035_dbl_kind enddo ! i @@ -1699,12 +1701,12 @@ subroutine prepare_forcing (nx_block, ny_block, & enddo enddo #endif - elseif (trim(atm_data_type) == 'oned') then ! rectangular grid + elseif (trim(atm_data_type) == 'oned') then ! rectangular grid ! precip is in kg/m^2/s zlvl0 = c10 - + do j = jlo, jhi do i = ilo, ihi @@ -1736,7 +1738,7 @@ subroutine prepare_forcing (nx_block, ny_block, & elseif (trim(precip_units) == 'mm_per_day') then precip_factor = c1/secday elseif (trim(precip_units) == 'mm_per_sec' .or. & - trim(precip_units) == 'mks') then + trim(precip_units) == 'mks') then precip_factor = c1 ! mm/sec = kg/m^2 s elseif (trim(precip_units) == 'm_per_sec') then precip_factor = c1000 @@ -1753,20 +1755,20 @@ subroutine prepare_forcing (nx_block, ny_block, & swvdf(i,j) = fsw(i,j)*frcvdf ! visible diffuse swidr(i,j) = fsw(i,j)*frcidr ! near IR direct swidf(i,j) = fsw(i,j)*frcidf ! near IR diffuse - + ! convert precipitation units to kg/m^2 s fsnow(i,j) = fsnow(i,j) * precip_factor enddo ! i enddo ! j ! determine whether precip is rain or snow - ! HadGEM forcing provides separate snowfall and rainfall rather + ! HadGEM forcing provides separate snowfall and rainfall rather ! than total precipitation if (trim(atm_data_type) /= 'hadgem') then do j = jlo, jhi do i = ilo, ihi - frain(i,j) = c0 + frain(i,j) = c0 if (Tair(i,j) >= Tffresh) then frain(i,j) = fsnow(i,j) fsnow(i,j) = c0 @@ -1789,8 +1791,8 @@ subroutine prepare_forcing (nx_block, ny_block, & ! then interpolate to the U-cell centers (otherwise we ! interpolate across the pole). ! Use ANGLET which is on the T grid ! - ! Atmo variables are needed in T cell centers in subroutine - ! atmo_boundary_layer, and are interpolated to the U grid later as + ! Atmo variables are needed in T cell centers in subroutine + ! atmo_boundary_layer, and are interpolated to the U grid later as ! necessary. !----------------------------------------------------------------- workx = uatm(i,j) ! wind velocity, m/s @@ -1838,12 +1840,12 @@ subroutine longwave_parkinson_washington(Tair, cldf, flw) ! (for now) ! Parkinson, C. L. and W. M. Washington (1979), ! Large-scale numerical-model of sea ice, - ! JGR, 84, 311-337, doi:10.1029/JC084iC01p00311 + ! JGR, 84, 311-337, doi:10.1029/JC084iC01p00311 real(kind=dbl_kind), intent(in) :: & Tair , & ! air temperature (K) cldf ! cloud fraction - + real(kind=dbl_kind), intent(out) :: & flw ! incoming longwave radiation (W/m^2) @@ -1859,12 +1861,12 @@ subroutine longwave_parkinson_washington(Tair, cldf, flw) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - + flw = stefan_boltzmann*Tair**4 & * (c1 - 0.261_dbl_kind & * exp(-7.77e-4_dbl_kind*(Tffresh - Tair)**2)) & * (c1 + 0.275_dbl_kind*cldf) - + end subroutine longwave_parkinson_washington !======================================================================= @@ -1874,11 +1876,11 @@ subroutine longwave_rosati_miyakoda(cldf, Tsfc, & Qa, Tair, & hm, flw) - ! based on - ! Rosati, A. and K. Miyakoda (1988), - ! A general-circulation model for upper ocean simulation, - ! J. Physical Oceanography, 18, 1601-1626, - ! doi:10.1175/1520-0485(1988)018<1601:AGCMFU>2.0.CO;2 + ! based on + ! Rosati, A. and K. Miyakoda (1988), + ! A general-circulation model for upper ocean simulation, + ! J. Physical Oceanography, 18, 1601-1626, + ! doi:10.1175/1520-0485(1988)018<1601:AGCMFU>2.0.CO;2 real(kind=dbl_kind), intent(in) :: & cldf , & ! cloud fraction @@ -1897,7 +1899,7 @@ subroutine longwave_rosati_miyakoda(cldf, Tsfc, & sstk , & ! ice/ocean surface temperature (K) rtea , & ! square root of the vapour pressure ptem , & ! potential air temperature (K) - qlwm + qlwm real(kind=dbl_kind) :: & Tffresh, stefan_boltzmann, emissivity @@ -1924,7 +1926,7 @@ subroutine longwave_rosati_miyakoda(cldf, Tsfc, & + c4*(sstk-ptem) ) flw = emissivity*stefan_boltzmann * ( sstk**4 - qlwm ) flw = flw * hm ! land mask - + end subroutine longwave_rosati_miyakoda !======================================================================= @@ -2068,7 +2070,7 @@ subroutine ncar_data else call abort_ice (error_message=subname//'nonbinary atm_data_format unavailable', & file=__FILE__, line=__LINE__) -! The routine exists, for example: +! The routine exists, for example: ! call read_data_nc (readm, 0, fyear, ixm, mmonth, ixp, & ! maxrec, fsw_file, 'fsw', fsw_data, & ! field_loc_center, field_type_scalar) @@ -2197,7 +2199,7 @@ subroutine LY_files (yr) if (my_task == master_task) then write (nu_diag,*) ' ' - write (nu_diag,*) 'Forcing data year = ', fyear + write (nu_diag,*) 'Forcing data year = ', fyear write (nu_diag,*) 'Atmospheric data files:' write (nu_diag,*) trim(flw_file) write (nu_diag,*) trim(rain_file) @@ -2287,7 +2289,7 @@ subroutine LY_data use ice_grid, only: hm, tlon, tlat, tmask, umask use ice_state, only: aice - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & i, j , & ixm,ixx,ixp , & ! record numbers for neighboring months recnum , & ! record number @@ -2321,9 +2323,9 @@ subroutine LY_data file=__FILE__, line=__LINE__) !------------------------------------------------------------------- - ! monthly data + ! monthly data ! - ! Assume that monthly data values are located in the middle of the + ! Assume that monthly data values are located in the middle of the ! month. !------------------------------------------------------------------- @@ -2348,7 +2350,7 @@ subroutine LY_data ! Find interpolation coefficients call interp_coeff_monthly (recslot) - ! Read 2 monthly values + ! Read 2 monthly values readm = .false. if (istep==1 .or. (mday==midmonth .and. msec==0)) readm = .true. @@ -2362,7 +2364,7 @@ subroutine LY_data !------------------------------------------------------------------- ! 6-hourly data - ! + ! ! Assume that the 6-hourly value is located at the end of the ! 6-hour period. This is the convention for NCEP reanalysis data. ! E.g. record 1 gives conditions at 6 am GMT on 1 January. @@ -2464,29 +2466,29 @@ subroutine LY_data if (debug_forcing) then if (my_task == master_task) write (nu_diag,*) 'LY_bulk_data' vmin = global_minval(fsw,distrb_info,tmask) - + vmax = global_maxval(fsw,distrb_info,tmask) if (my_task.eq.master_task) & - write (nu_diag,*) 'fsw',vmin,vmax + write (nu_diag,*) 'fsw',vmin,vmax vmin = global_minval(cldf,distrb_info,tmask) vmax = global_maxval(cldf,distrb_info,tmask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'cldf',vmin,vmax vmin =global_minval(fsnow,distrb_info,tmask) vmax =global_maxval(fsnow,distrb_info,tmask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'fsnow',vmin,vmax vmin = global_minval(Tair,distrb_info,tmask) vmax = global_maxval(Tair,distrb_info,tmask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'Tair',vmin,vmax vmin = global_minval(uatm,distrb_info,umask) vmax = global_maxval(uatm,distrb_info,umask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'uatm',vmin,vmax vmin = global_minval(vatm,distrb_info,umask) vmax = global_maxval(vatm,distrb_info,umask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'vatm',vmin,vmax vmin = global_minval(Qa,distrb_info,tmask) vmax = global_maxval(Qa,distrb_info,tmask) @@ -2503,9 +2505,9 @@ subroutine JRA55_data use ice_blocks, only: block, get_block use ice_global_reductions, only: global_minval, global_maxval - use ice_domain, only: nblocks, distrb_info, blocks_ice + use ice_domain, only: nblocks, distrb_info use ice_flux, only: fsnow, Tair, uatm, vatm, Qa, fsw, flw - use ice_grid, only: hm, tlon, tlat, tmask, umask + use ice_grid, only: hm, tmask, umask use ice_state, only: aice use ice_calendar, only: days_per_year @@ -2782,7 +2784,7 @@ subroutine compute_shortwave(nx_block, ny_block, & secday , & pi , & lontmp , & - deg2rad + deg2rad integer (kind=int_kind) :: & i, j @@ -2823,7 +2825,7 @@ subroutine compute_shortwave(nx_block, ny_block, & sw0 = max(sw0,c0) ! total downward shortwave for cice - Fsw(i,j) = sw0*(c1-p6*cldf(i,j)**3) + Fsw(i,j) = sw0*(c1-p6*cldf(i,j)**3) Fsw(i,j) = Fsw(i,j)*hm(i,j) enddo enddo @@ -2865,7 +2867,7 @@ subroutine Qa_fixLY(nx_block, ny_block, Tair, Qa) /(c1 + 0.00412_dbl_kind*worka) & ! 2+ converts ea mb -> Pa + 0.00422_dbl_kind*worka ! for ice ! vapor pressure - worka = (c10**worka) ! saturated + worka = (c10**worka) ! saturated worka = max(worka,puny) ! puny over land to prevent division by zero ! specific humidity worka = 0.622_dbl_kind*worka/(1.e5_dbl_kind-0.378_dbl_kind*worka) @@ -2981,13 +2983,13 @@ subroutine hadgem_files (yr) endif ! calc_strair ! -------------------------------------------------------------- - ! Atmosphere properties. Even if these fields are not + ! Atmosphere properties. Even if these fields are not ! being used to force the ice (i.e. calc_Tsfc=.false.), they ! are still needed to generate forcing for mixed layer model or ! to calculate wind stress ! -------------------------------------------------------------- - if (calc_Tsfc .or. oceanmixed_ice .or. calc_strair) then + if (calc_Tsfc .or. oceanmixed_ice .or. calc_strair) then fsw_file = & trim(atm_data_dir)//'/MONTHLY/SW_incoming.1996.nc' @@ -3032,14 +3034,14 @@ subroutine hadgem_files (yr) trim(atm_data_dir)//'/MONTHLY/topmeltn',n,'.1996.nc' call file_year(topmelt_file(n),yr) - ! 'botmelt' = fcondtop. + ! 'botmelt' = fcondtop. write(botmelt_file(n), '(a,i1,a)') & trim(atm_data_dir)//'/MONTHLY/botmeltn',n,'.1996.nc' call file_year(botmelt_file(n),yr) enddo - ! 'sublim' = - flat / Lsub. + ! 'sublim' = - flat / Lsub. sublim_file = & trim(atm_data_dir)//'/MONTHLY/sublim.1996.nc' call file_year(sublim_file,yr) @@ -3085,7 +3087,7 @@ subroutine hadgem_data botmelt, & sublim - character (char_len) :: & + character (char_len) :: & fieldname ! field name in netcdf file real (kind=dbl_kind) :: & @@ -3212,15 +3214,15 @@ subroutine hadgem_data endif ! calc_strair ! ----------------------------------------------------------- - ! SW incoming, LW incoming, air temperature, density and - ! humidity at 10m. + ! SW incoming, LW incoming, air temperature, density and + ! humidity at 10m. ! - ! Even if these fields are not being used to force the ice - ! (i.e. calc_Tsfc=.false.), they are still needed to generate + ! Even if these fields are not being used to force the ice + ! (i.e. calc_Tsfc=.false.), they are still needed to generate ! forcing for mixed layer model or to calculate wind stress ! ----------------------------------------------------------- - if (calc_Tsfc .or. oceanmixed_ice .or. calc_strair) then + if (calc_Tsfc .or. oceanmixed_ice .or. calc_strair) then fieldname='SW_incoming' call read_data_nc (readm, 0, fyear, ixm, mmonth, ixp, & @@ -3287,7 +3289,7 @@ subroutine hadgem_data ! botmelt = fcondtop (as zero layer) ! ! Convert UM sublimation data into CICE LH flux - ! (sublim = - flatn / Lsub) and have same value for all + ! (sublim = - flatn / Lsub) and have same value for all ! categories !-------------------------------------------------------- @@ -3296,7 +3298,7 @@ subroutine hadgem_data do j = 1, ny_block do i = 1, nx_block fcondtopn_f(i,j,n,iblk) = botmelt(i,j,iblk) - fsurfn_f(i,j,n,iblk) = topmelt(i,j,iblk) & + fsurfn_f(i,j,n,iblk) = topmelt(i,j,iblk) & + botmelt(i,j,iblk) flatn_f(i,j,n,iblk) = - sublim(i,j,iblk)*Lsub enddo @@ -3306,12 +3308,12 @@ subroutine hadgem_data enddo ! ncat - endif ! .not. calc_Tsfc + endif ! .not. calc_Tsfc end subroutine hadgem_data !======================================================================= -! monthly forcing +! monthly forcing !======================================================================= subroutine monthly_files (yr) @@ -3359,7 +3361,7 @@ subroutine monthly_files (yr) if (my_task == master_task) then write (nu_diag,*) ' ' - write (nu_diag,*) 'Forcing data year = ', fyear + write (nu_diag,*) 'Forcing data year = ', fyear write (nu_diag,*) 'Atmospheric data files:' write (nu_diag,*) trim(flw_file) write (nu_diag,*) trim(rain_file) @@ -3382,7 +3384,7 @@ subroutine monthly_data use ice_flux, only: fsnow, Tair, Qa, wind, strax, stray, fsw use ice_grid, only: hm, tlon, tlat, tmask, umask - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & i, j , & ixm,ixp , & ! record numbers for neighboring months maxrec , & ! maximum record number @@ -3398,15 +3400,15 @@ subroutine monthly_data type (block) :: & this_block ! block information for current block - + character(len=*), parameter :: subname = '(monthly_data)' if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' !------------------------------------------------------------------- - ! monthly data + ! monthly data ! - ! Assume that monthly data values are located in the middle of the + ! Assume that monthly data values are located in the middle of the ! month. !------------------------------------------------------------------- @@ -3431,7 +3433,7 @@ subroutine monthly_data ! Find interpolation coefficients call interp_coeff_monthly (recslot) - ! Read 2 monthly values + ! Read 2 monthly values readm = .false. if (istep==1 .or. (mday==midmonth .and. msec==0)) readm = .true. @@ -3505,30 +3507,30 @@ subroutine monthly_data vmin = global_minval(fsw,distrb_info,tmask) vmax = global_maxval(fsw,distrb_info,tmask) if (my_task.eq.master_task) & - write (nu_diag,*) 'fsw',vmin,vmax + write (nu_diag,*) 'fsw',vmin,vmax vmin = global_minval(cldf,distrb_info,tmask) vmax = global_maxval(cldf,distrb_info,tmask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'cldf',vmin,vmax vmin =global_minval(fsnow,distrb_info,tmask) vmax =global_maxval(fsnow,distrb_info,tmask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'fsnow',vmin,vmax vmin = global_minval(Tair,distrb_info,tmask) vmax = global_maxval(Tair,distrb_info,tmask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'Tair',vmin,vmax vmin = global_minval(wind,distrb_info,umask) vmax = global_maxval(wind,distrb_info,umask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'wind',vmin,vmax vmin = global_minval(strax,distrb_info,umask) vmax = global_maxval(strax,distrb_info,umask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'strax',vmin,vmax vmin = global_minval(stray,distrb_info,umask) vmax = global_maxval(stray,distrb_info,umask) - if (my_task.eq.master_task) & + if (my_task.eq.master_task) & write (nu_diag,*) 'stray',vmin,vmax vmin = global_minval(Qa,distrb_info,tmask) vmax = global_maxval(Qa,distrb_info,tmask) @@ -3549,7 +3551,7 @@ subroutine oned_data ! local parameters - character (char_len_long) :: & + character (char_len_long) :: & met_file, & ! netcdf filename fieldname ! field name in netcdf file @@ -3570,79 +3572,79 @@ subroutine oned_data Psat , & ! saturation vapour pressure (hPa) ws ! saturation mixing ratio - real (kind=dbl_kind), parameter :: & ! coefficients for Hyland-Wexler Qa - ps1 = 0.58002206e4_dbl_kind, & ! (K) + real (kind=dbl_kind), parameter :: & ! coefficients for Hyland-Wexler Qa + ps1 = 0.58002206e4_dbl_kind, & ! (K) ps2 = 1.3914993_dbl_kind, & ! - ps3 = 0.48640239e-1_dbl_kind, & ! (K^-1) + ps3 = 0.48640239e-1_dbl_kind, & ! (K^-1) ps4 = 0.41764768e-4_dbl_kind, & ! (K^-2) ps5 = 0.14452093e-7_dbl_kind, & ! (K^-3) ps6 = 6.5459673_dbl_kind, & ! - ws1 = 621.97_dbl_kind, & ! for saturation mixing ratio - Pair = 1020._dbl_kind ! Sea level pressure (hPa) - + ws1 = 621.97_dbl_kind, & ! for saturation mixing ratio + Pair = 1020._dbl_kind ! Sea level pressure (hPa) + character(len=*), parameter :: subname = '(oned_data)' if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' - diag = .false. ! write diagnostic information - + diag = .false. ! write diagnostic information + if (trim(atm_data_format) == 'nc') then ! read nc file - ! hourly data beginning Jan 1, 1989, 01:00 + ! hourly data beginning Jan 1, 1989, 01:00 ! HARDWIRED for dt = 1 hour! met_file = uwind_file call ice_open_nc(met_file,fid) - fieldname='Uatm' - call ice_read_nc(fid,istep1,fieldname,work,diag) + fieldname='Uatm' + call ice_read_nc(fid,istep1,fieldname,work,diag) uatm(:,:,:) = work - fieldname='Vatm' - call ice_read_nc(fid,istep1,fieldname,work,diag) + fieldname='Vatm' + call ice_read_nc(fid,istep1,fieldname,work,diag) vatm(:,:,:) = work - fieldname='Tair' - call ice_read_nc(fid,istep1,fieldname,work,diag) + fieldname='Tair' + call ice_read_nc(fid,istep1,fieldname,work,diag) Temp = work - Tair(:,:,:) = Temp + Tair(:,:,:) = Temp call ice_close_nc(fid) - ! hourly solar data beginning Jan 1, 1989, 01:00 + ! hourly solar data beginning Jan 1, 1989, 01:00 met_file = fsw_file call ice_open_nc(met_file,fid) - fieldname='fsw' - call ice_read_nc(fid,istep1,fieldname,work,diag) + fieldname='fsw' + call ice_read_nc(fid,istep1,fieldname,work,diag) fsw(:,:,:) = work call ice_close_nc(fid) - ! hourly interpolated monthly data beginning Jan 1, 1989, 01:00 + ! hourly interpolated monthly data beginning Jan 1, 1989, 01:00 met_file = humid_file call ice_open_nc(met_file,fid) - fieldname='rh' - call ice_read_nc(fid,istep1,fieldname,work,diag) + fieldname='rh' + call ice_read_nc(fid,istep1,fieldname,work,diag) rh = work - - fieldname='fsnow' - call ice_read_nc(fid,istep1,fieldname,work,diag) + + fieldname='fsnow' + call ice_read_nc(fid,istep1,fieldname,work,diag) fsnow(:,:,:) = work call ice_close_nc(fid) !------------------------------------------------------------------- ! Find specific humidity using Hyland-Wexler formulation - ! Hyland, R.W. and A. Wexler, Formulations for the Thermodynamic - ! Properties of the saturated phases of H20 from 173.15K to 473.15K, + ! Hyland, R.W. and A. Wexler, Formulations for the Thermodynamic + ! Properties of the saturated phases of H20 from 173.15K to 473.15K, ! ASHRAE Trans, 89(2A), 500-519, 1983 !------------------------------------------------------------------- - - Psat = exp(-ps1/Temp + ps2 - ps3*Temp + ps4*Temp**2 - ps5 * Temp**3 & + + Psat = exp(-ps1/Temp + ps2 - ps3*Temp + ps4*Temp**2 - ps5 * Temp**3 & + ps6 * log(Temp))*p01 ! saturation vapour pressure ws = ws1 * Psat/(Pair - Psat) ! saturation mixing ratio - Qa(:,:,:) = rh * ws * p01/(c1 + rh * ws * p01) * p001 + Qa(:,:,:) = rh * ws * p01/(c1 + rh * ws * p01) * p001 ! specific humidity (kg/kg) endif ! atm_data_format @@ -3650,7 +3652,7 @@ subroutine oned_data rhoa (:,:,:) = 1.3_dbl_kind ! air density (kg/m^3) cldf (:,:,:) = p25 ! cloud fraction frain(:,:,:) = c0 ! this is available in hourlymet_rh file - + end subroutine oned_data !======================================================================= @@ -3831,19 +3833,19 @@ end subroutine ocn_data_clim subroutine ocn_data_ncar_init ! Reads NCAR pop ocean forcing data set 'pop_frc_gx1v3_010815.nc' -! +! ! List of ocean forcing fields: Note that order is important! ! (order is determined by field list in vname). -! -! For ocean mixed layer-----------------------------units -! -! 1 sst------temperature---------------------------(C) -! 2 sss------salinity------------------------------(ppt) -! 3 hbl------depth---------------------------------(m) -! 4 u--------surface u current---------------------(m/s) -! 5 v--------surface v current---------------------(m/s) -! 6 dhdx-----surface tilt x direction--------------(m/m) -! 7 dhdy-----surface tilt y direction--------------(m/m) +! +! For ocean mixed layer-----------------------------units +! +! 1 sst------temperature---------------------------(C) +! 2 sss------salinity------------------------------(ppt) +! 3 hbl------depth---------------------------------(m) +! 4 u--------surface u current---------------------(m/s) +! 5 v--------surface v current---------------------(m/s) +! 6 dhdx-----surface tilt x direction--------------(m/m) +! 7 dhdy-----surface tilt y direction--------------(m/m) ! 8 qdp------ocean sub-mixed layer heat flux-------(W/m2) ! ! Fields 4, 5, 6, 7 are on the U-grid; 1, 2, 3, and 8 are @@ -3858,7 +3860,7 @@ subroutine ocn_data_ncar_init use netcdf #endif - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & n , & ! field index m , & ! month index nrec, & ! record number for direct access @@ -3870,12 +3872,10 @@ subroutine ocn_data_ncar_init 'T', 'S', 'hblt', 'U', 'V', & 'dhdx', 'dhdy', 'qdp' / - integer (kind=int_kind) :: & - fid , & ! file id - dimid ! dimension id - integer (kind=int_kind) :: & status , & ! status flag + fid , & ! file id + dimid , & ! dimension id nlat , & ! number of longitudes of data nlon ! number of latitudes of data @@ -3894,7 +3894,7 @@ subroutine ocn_data_ncar_init write (nu_diag,*) 'WARNING: Alter ice_dyn_evp.F90 if desired.' if (restore_ocn) write (nu_diag,*) & - 'SST restoring timescale = ',trestore,' days' + 'SST restoring timescale = ',trestore,' days' sst_file = trim(ocn_data_dir)//'/'//trim(oceanmixed_file) ! not just sst @@ -3914,7 +3914,7 @@ subroutine ocn_data_ncar_init ! status = nf90_inq_dimid(fid,'nlon',dimid) status = nf90_inq_dimid(fid,'ni',dimid) status = nf90_inquire_dimension(fid,dimid,len=nlon) - + ! status = nf90_inq_dimid(fid,'nlat',dimid) status = nf90_inq_dimid(fid,'nj',dimid) status = nf90_inquire_dimension(fid,dimid,len=nlat) @@ -3933,7 +3933,7 @@ subroutine ocn_data_ncar_init ! Read in ocean forcing data for all 12 months do n=1,nfld do m=1,12 - + ! Note: netCDF does single to double conversion if necessary ! if (n >= 4 .and. n <= 7) then ! call ice_read_nc(fid, m, vname(n), work1, debug_forcing, & @@ -3989,19 +3989,19 @@ end subroutine ocn_data_ncar_init subroutine ocn_data_ncar_init_3D ! Reads NCAR pop ocean forcing data set 'oceanmixed_ice_depth.nc' -! +! ! List of ocean forcing fields: Note that order is important! ! (order is determined by field list in vname). -! -! For ocean mixed layer-----------------------------units -! -! 1 sst------temperature---------------------------(C) -! 2 sss------salinity------------------------------(ppt) -! 3 hbl------depth---------------------------------(m) -! 4 u--------surface u current---------------------(m/s) -! 5 v--------surface v current---------------------(m/s) -! 6 dhdx-----surface tilt x direction--------------(m/m) -! 7 dhdy-----surface tilt y direction--------------(m/m) +! +! For ocean mixed layer-----------------------------units +! +! 1 sst------temperature---------------------------(C) +! 2 sss------salinity------------------------------(ppt) +! 3 hbl------depth---------------------------------(m) +! 4 u--------surface u current---------------------(m/s) +! 5 v--------surface v current---------------------(m/s) +! 6 dhdx-----surface tilt x direction--------------(m/m) +! 7 dhdy-----surface tilt y direction--------------(m/m) ! 8 qdp------ocean sub-mixed layer heat flux-------(W/m2) ! ! All fields are on the T-grid. @@ -4018,7 +4018,7 @@ subroutine ocn_data_ncar_init_3D #endif #ifdef USE_NETCDF - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & n , & ! field index m , & ! month index nzlev ! z level of currents @@ -4030,8 +4030,8 @@ subroutine ocn_data_ncar_init_3D 'dhdx', 'dhdy', 'qdp' / integer (kind=int_kind) :: & - fid , & ! file id - dimid ! dimension id + fid , & ! file id + dimid ! dimension id integer (kind=int_kind) :: & status , & ! status flag @@ -4054,7 +4054,7 @@ subroutine ocn_data_ncar_init_3D write (nu_diag,*) 'WARNING: Alter ice_dyn_evp.F if desired.' if (restore_ocn) write (nu_diag,*) & - 'SST restoring timescale = ',trestore,' days' + 'SST restoring timescale = ',trestore,' days' sst_file = trim(ocn_data_dir)//'/'//trim(oceanmixed_file) ! not just sst @@ -4075,7 +4075,7 @@ subroutine ocn_data_ncar_init_3D ! status = nf90_inq_dimid(fid,'nlon',dimid) status = nf90_inq_dimid(fid,'ni',dimid) status = nf90_inquire_dimension(fid,dimid,len=nlon) - + ! status = nf90_inq_dimid(fid,'nlat',dimid) status = nf90_inq_dimid(fid,'nj',dimid) status = nf90_inquire_dimension(fid,dimid,len=nlat) @@ -4094,7 +4094,7 @@ subroutine ocn_data_ncar_init_3D ! Read in ocean forcing data for all 12 months do n=1,nfld do m=1,12 - + ! Note: netCDF does single to double conversion if necessary if (n == 4 .or. n == 5) then ! 3D currents nzlev = 1 ! surface currents @@ -4105,7 +4105,7 @@ subroutine ocn_data_ncar_init_3D field_loc_center, field_type_scalar) endif - ! the land mask used in ocean_mixed_depth.nc does not + ! the land mask used in ocean_mixed_depth.nc does not ! match our gx1v3 mask (hm) where (work1(:,:,:) < -900.) work1(:,:,:) = c0 @@ -4168,7 +4168,7 @@ subroutine ocn_data_ncar(dt) real (kind=dbl_kind), intent(in) :: & dt ! time step - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & i, j, n, iblk , & ixm,ixp , & ! record numbers for neighboring months maxrec , & ! maximum record number @@ -4186,12 +4186,12 @@ subroutine ocn_data_ncar(dt) if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' !------------------------------------------------------------------- - ! monthly data + ! monthly data ! - ! Assume that monthly data values are located in the middle of the + ! Assume that monthly data values are located in the middle of the ! month. !------------------------------------------------------------------- - + midmonth = 15 ! data is given on 15th of every month ! midmonth = fix(p5 * real(daymo(mmonth),kind=dbl_kind)) ! exact middle @@ -4228,8 +4228,8 @@ subroutine ocn_data_ncar(dt) call interpolate_data (sst_data,work1) ! masking by hm is necessary due to NaNs in the data file - do j = 1, ny_block - do i = 1, nx_block + do j = 1, ny_block + do i = 1, nx_block if (n == 2) sss (i,j,:) = c0 if (n == 3) hmix (i,j,:) = c0 if (n == 4) uocn (i,j,:) = c0 @@ -4252,21 +4252,21 @@ subroutine ocn_data_ncar(dt) enddo enddo - do j = 1, ny_block - do i = 1, nx_block - sss (i,j,:) = max (sss(i,j,:), c0) - hmix(i,j,:) = max(hmix(i,j,:), c0) - enddo - enddo + do j = 1, ny_block + do i = 1, nx_block + sss (i,j,:) = max (sss(i,j,:), c0) + hmix(i,j,:) = max(hmix(i,j,:), c0) + enddo + enddo call ocn_freezing_temperature if (restore_ocn) then - do j = 1, ny_block - do i = 1, nx_block - sst(i,j,:) = sst(i,j,:) + (work1(i,j,:)-sst(i,j,:))*dt/trest - enddo - enddo + do j = 1, ny_block + do i = 1, nx_block + sst(i,j,:) = sst(i,j,:) + (work1(i,j,:)-sst(i,j,:))*dt/trest + enddo + enddo ! else sst is only updated in ice_ocean.F endif @@ -4275,16 +4275,16 @@ subroutine ocn_data_ncar(dt) call interpolate_data (sst_data,sst) !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block + do j = 1, ny_block + do i = 1, nx_block if (hm(i,j,iblk) == c1) then - sst(i,j,iblk) = max (sst(i,j,iblk), Tf(i,j,iblk)) + sst(i,j,iblk) = max (sst(i,j,iblk), Tf(i,j,iblk)) else sst(i,j,iblk) = c0 endif - enddo - enddo - enddo + enddo + enddo + enddo !$OMP END PARALLEL DO endif @@ -4365,12 +4365,13 @@ subroutine ocn_data_hadgem(dt) ! Reads in HadGEM ocean forcing data as required from netCDF files ! Current options (selected by ocn_data_type) -! hadgem_sst: read in sst only +! hadgem_sst: read in sst only ! hadgem_sst_uvocn: read in sst plus uocn and vocn ! authors: Ann Keen, Met Office use ice_domain, only: nblocks + use ice_domain_size, only: max_blocks use ice_flux, only: sst, uocn, vocn use ice_grid, only: grid_average_X2Y, ANGLET @@ -4387,17 +4388,14 @@ subroutine ocn_data_hadgem(dt) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: & sstdat ! data value toward which SST is restored - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 ! temporary array - real (kind=dbl_kind) :: workx, worky logical (kind=log_kind) :: readm - character (char_len) :: & + character (char_len) :: & fieldname ! field name in netcdf file - character (char_len_long) :: & + character (char_len_long) :: & filename ! name of netCDF file character(len=*), parameter :: subname = '(ocn_data_hadgem)' @@ -4458,7 +4456,7 @@ subroutine ocn_data_hadgem(dt) call read_data_nc (readm, 0, fyear, ixm, mmonth, ixp, & maxrec, sst_file, fieldname, sst_data, & field_loc_center, field_type_scalar) - + ! Interpolate to current time step call interpolate_data (sst_data, sstdat) @@ -4474,14 +4472,14 @@ subroutine ocn_data_hadgem(dt) enddo enddo !$OMP END PARALLEL DO - endif + endif ! ----------------------------------------------------------- ! Ocean currents ! -------------- - ! Values read in are on T grid and oriented geographically, hence + ! Values read in are on T grid and oriented geographically, hence ! vectors need to be rotated to model grid and then interpolated - ! to U grid. + ! to U grid. ! Also need to be converted from cm s-1 (UM) to m s-1 (CICE) ! ----------------------------------------------------------- @@ -4492,7 +4490,7 @@ subroutine ocn_data_hadgem(dt) call read_data_nc (readm, 0, fyear, ixm, mmonth, ixp, & maxrec, filename, fieldname, uocn_data, & field_loc_center, field_type_vector) - + ! Interpolate to current time step call interpolate_data (uocn_data, uocn) @@ -4501,25 +4499,25 @@ subroutine ocn_data_hadgem(dt) call read_data_nc (readm, 0, fyear, ixm, mmonth, ixp, & maxrec, filename, fieldname, vocn_data, & field_loc_center, field_type_vector) - + ! Interpolate to current time step call interpolate_data (vocn_data, vocn) - !----------------------------------------------------------------- - ! Rotate zonal/meridional vectors to local coordinates, + !----------------------------------------------------------------- + ! Rotate zonal/meridional vectors to local coordinates, ! and change units - !----------------------------------------------------------------- + !----------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk,i,j,workx,worky) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - workx = uocn(i,j,iblk) + workx = uocn(i,j,iblk) worky = vocn(i,j,iblk) - uocn(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & - + worky*sin(ANGLET(i,j,iblk)) - vocn(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & + uocn(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & + + worky*sin(ANGLET(i,j,iblk)) + vocn(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & - workx*sin(ANGLET(i,j,iblk)) uocn(i,j,iblk) = uocn(i,j,iblk) * cm_to_m @@ -4530,15 +4528,11 @@ subroutine ocn_data_hadgem(dt) enddo ! nblocks !$OMP END PARALLEL DO - !----------------------------------------------------------------- - ! Interpolate to U grid - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! Interpolate to U grid + !----------------------------------------------------------------- ! tcraig, this is now computed in dynamics for consistency - !work1 = uocn - !call grid_average_X2Y('F',work1,'T',uocn,'U') - !work1 = vocn - !call grid_average_X2Y('F',work1,'T',vocn,'U') endif ! ocn_data_type = hadgem_sst_uvocn @@ -4688,7 +4682,7 @@ subroutine hycom_atm_data call icepack_query_parameters(Tffresh_out=Tffresh) call icepack_query_parameters(secday_out=secday) - ! current time in HYCOM jday units (HYCOM ref year: 1900,12,31,000000) + ! current time in HYCOM jday units (HYCOM ref year: 1900,12,31,000000) hcdate = real(compute_days_between(1900,12,31,myear,mmonth,mday)) + msec/secday ! Init recnum try @@ -4897,13 +4891,13 @@ subroutine read_data_nc_point (flag, recd, yr, ixm, ixx, ixp, & ! write(nu_diag,*) 'ixm, ixx, ixp ', ixm, ixx, ixp ! write(nu_diag,*) 'maxrec ', maxrec ! write(nu_diag,*) 'fieldname ', fieldname - + call ice_open_nc (data_file, fid) arg = 1 nrec = recd + n2 - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(arg), debug_forcing, & field_loc, field_type) @@ -4918,7 +4912,7 @@ subroutine read_data_nc_point (flag, recd, yr, ixm, ixx, ixp, & arg = arg + 1 nrec = recd + ixx - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(arg), debug_forcing, & field_loc, field_type) @@ -4944,7 +4938,7 @@ subroutine read_data_nc_point (flag, recd, yr, ixm, ixx, ixp, & arg = arg + 1 nrec = recd + n4 - call ice_read_nc & + call ice_read_nc & (fid, nrec, fieldname, field_data(arg), debug_forcing, & field_loc, field_type) endif ! ixp /= -99 @@ -4966,7 +4960,7 @@ subroutine ISPOL_files if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' fsw_file = & - trim(atm_data_dir)//'/fsw_sfc_4Xdaily.nc' + trim(atm_data_dir)//'/fsw_sfc_4Xdaily.nc' flw_file = & trim(atm_data_dir)//'/flw_sfc_4Xdaily.nc' @@ -4978,10 +4972,10 @@ subroutine ISPOL_files trim(atm_data_dir)//'/uatm_10m_daily.nc' vwind_file = & - trim(atm_data_dir)//'/vatm_10m_daily.nc' + trim(atm_data_dir)//'/vatm_10m_daily.nc' tair_file = & - trim(atm_data_dir)//'/Tair_2m_daily.nc' + trim(atm_data_dir)//'/Tair_2m_daily.nc' humid_file = & trim(atm_data_dir)//'/Qa_2m_daily.nc' @@ -5004,7 +4998,7 @@ end subroutine ISPOL_files subroutine ISPOL_data -! Defines atmospheric data fields for Antarctic Weddell sea location +! Defines atmospheric data fields for Antarctic Weddell sea location ! authors: Nicole Jeffery, LANL ! @@ -5013,7 +5007,7 @@ subroutine ISPOL_data !local parameters - character (char_len_long) :: & + character (char_len_long) :: & met_file, & ! netcdf filename fieldname ! field name in netcdf file @@ -5022,19 +5016,19 @@ subroutine ISPOL_data Qa_data_p, fsnow_data_p, & fsw_data_p, flw_data_p, & uatm_data_p, vatm_data_p - - real (kind=dbl_kind), parameter :: & ! coefficients for Hyland-Wexler Qa - ps1 = 0.58002206e4_dbl_kind, & ! (K) + + real (kind=dbl_kind), parameter :: & ! coefficients for Hyland-Wexler Qa + ps1 = 0.58002206e4_dbl_kind, & ! (K) ps2 = 1.3914993_dbl_kind, & ! - ps3 = 0.48640239e-1_dbl_kind, & ! (K^-1) + ps3 = 0.48640239e-1_dbl_kind, & ! (K^-1) ps4 = 0.41764768e-4_dbl_kind, & ! (K^-2) ps5 = 0.14452093e-7_dbl_kind, & ! (K^-3) ps6 = 6.5459673_dbl_kind, & ! - ws1 = 621.97_dbl_kind, & ! for saturation mixing ratio - Pair = 1020._dbl_kind, & ! Sea level pressure (hPa) + ws1 = 621.97_dbl_kind, & ! for saturation mixing ratio + Pair = 1020._dbl_kind, & ! Sea level pressure (hPa) lapse_rate = 0.0065_dbl_kind ! (K/m) lapse rate over sea level - - ! for interpolation of hourly data + + ! for interpolation of hourly data integer (kind=int_kind) :: & ixm,ixx,ixp , & ! record numbers for neighboring months maxrec , & ! maximum record number @@ -5043,7 +5037,7 @@ subroutine ISPOL_data ! = 2 for date located at end of time interval real (kind=dbl_kind) :: & secday , & - Qa_pnt + Qa_pnt real (kind=dbl_kind) :: & sec1hr ! number of seconds in 1 hour @@ -5062,20 +5056,20 @@ subroutine ISPOL_data call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - + if (trim(atm_data_format) == 'nc') then ! read nc file - + !------------------------------------------------------------------- ! data from NCEP_DOE Reanalysis 2 and Bareiss et al 2008 - ! daily data located at the end of the 24-hour period. + ! daily data located at the end of the 24-hour period. !------------------------------------------------------------------- dataloc = 2 ! data located at end of interval sec1hr = secday ! seconds in day - maxrec = 366 ! + maxrec = 366 ! ! current record number - recnum = int(yday) + recnum = int(yday) ! Compute record numbers for surrounding data (2 on each side) ixm = mod(recnum+maxrec-2,maxrec) + 1 @@ -5092,11 +5086,11 @@ subroutine ISPOL_data read1 = .false. if (istep==1 .or. oldrecnum .ne. recnum) read1 = .true. - + ! Daily 2m Air temperature 1991 - - met_file = tair_file - fieldname='Tair' + + met_file = tair_file + fieldname='Tair' call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & maxrec, met_file, fieldname, Tair_data_p, & @@ -5106,7 +5100,7 @@ subroutine ISPOL_data + c2intp * Tair_data_p(2) & - lapse_rate*8.0_dbl_kind - met_file = humid_file + met_file = humid_file fieldname='Qa' call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & @@ -5114,7 +5108,7 @@ subroutine ISPOL_data field_loc_center, field_type_scalar) Qa_pnt= c1intp * Qa_data_p(1) & - + c2intp * Qa_data_p(2) + + c2intp * Qa_data_p(2) Qa(:,:,:) = Qa_pnt met_file = uwind_file @@ -5125,19 +5119,19 @@ subroutine ISPOL_data field_loc_center, field_type_scalar) uatm(:,:,:) = c1intp * uatm_data_p(1) & - + c2intp * uatm_data_p(2) + + c2intp * uatm_data_p(2) met_file = vwind_file fieldname='vatm' - + call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & maxrec, met_file, fieldname, vatm_data_p, & field_loc_center, field_type_scalar) vatm(:,:,:) = c1intp * vatm_data_p(1) & + c2intp * vatm_data_p(2) - - met_file = rain_file + + met_file = rain_file fieldname='fsnow' call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & @@ -5145,7 +5139,7 @@ subroutine ISPOL_data field_loc_center, field_type_scalar) fsnow(:,:,:) = (c1intp * fsnow_data_p(1) + & - c2intp * fsnow_data_p(2)) + c2intp * fsnow_data_p(2)) !----------------------------- !fsw and flw are every 6 hours @@ -5155,7 +5149,7 @@ subroutine ISPOL_data maxrec = 1460 ! 366*4 ! current record number - recnum4X = 4*int(yday) - 3 + int(real(msec,kind=dbl_kind)/sec1hr) + recnum4X = 4*int(yday) - 3 + int(real(msec,kind=dbl_kind)/sec1hr) ! Compute record numbers for surrounding data (2 on each side) ixm = mod(recnum4X+maxrec-2,maxrec) + 1 @@ -5183,14 +5177,14 @@ subroutine ISPOL_data + c2intp * fsw_data_p(2) met_file = flw_file - fieldname='flw' + fieldname='flw' call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & maxrec, met_file, fieldname, flw_data_p, & field_loc_center, field_type_scalar) flw(:,:,:) = c1intp * flw_data_p(1) & - + c2intp * flw_data_p(2) + + c2intp * flw_data_p(2) endif !nc !flw given cldf and Tair calculated in prepare_forcing @@ -5202,7 +5196,7 @@ subroutine ISPOL_data rhoa (:,:,:) = 1.3_dbl_kind ! air density (kg/m^3) cldf(:,:,:) = c1 !0.25_dbl_kind ! cloud fraction frain(:,:,:) = c0 ! this is available in hourlymet_rh file - + ! Save record number for next time step oldrecnum = recnum oldrecnum4X = recnum4X @@ -5211,20 +5205,20 @@ end subroutine ISPOL_data !======================================================================= - subroutine ocn_data_ispol_init + subroutine ocn_data_ispol_init ! Reads NCAR pop ocean forcing data set 'pop_frc_gx1v3_010815.nc' ! at the ISPOL location -67.4677N, 310.4375E ! -! For ocean mixed layer-----------------------------units -! -! 1 sst------temperature---------------------------(C) -! 2 sss------salinity------------------------------(ppt) -! 3 hbl------depth---------------------------------(m) -! 4 u--------surface u current---------------------(m/s) +! For ocean mixed layer-----------------------------units +! +! 1 sst------temperature---------------------------(C) +! 2 sss------salinity------------------------------(ppt) +! 3 hbl------depth---------------------------------(m) +! 4 u--------surface u current---------------------(m/s) ! 5 v--------surface v current---------------------(m/s) -! 6 dhdx-----surface tilt x direction--------------(m/m) -! 7 dhdy-----surface tilt y direction--------------(m/m) +! 6 dhdx-----surface tilt x direction--------------(m/m) +! 7 dhdy-----surface tilt y direction--------------(m/m) ! 8 qdp------ocean sub-mixed layer heat flux-------(W/m2) ! ! Fields 4, 5, 6, 7 are on the U-grid; 1, 2, 3, and 8 are @@ -5235,7 +5229,7 @@ subroutine ocn_data_ispol_init use ice_gather_scatter use ice_read_write - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & n , & ! field index m ! month index @@ -5246,13 +5240,10 @@ subroutine ocn_data_ispol_init 'dhdx', 'dhdy', 'qdp' / real (kind=dbl_kind) :: & - work - - integer (kind=int_kind) :: & - fid ! file id + work integer (kind=int_kind) :: & - status ! status flag + fid ! file id character(len=*), parameter :: subname = '(ocn_data_ispol_init)' @@ -5261,7 +5252,7 @@ subroutine ocn_data_ispol_init if (my_task == master_task) then if (restore_ocn) write (nu_diag,*) & - 'SST restoring timescale = ',trestore,' days' + 'SST restoring timescale = ',trestore,' days' sst_file = trim(ocn_data_dir)//'/'//trim(oceanmixed_file) ! not just sst @@ -5280,14 +5271,14 @@ subroutine ocn_data_ispol_init ! Read in ocean forcing data for all 12 months do n=1,nfld - do m=1,12 + do m=1,12 ! Note: netCDF does single to double conversion if necessary if (n >= 4 .and. n <= 7) then call ice_read_nc(fid, m, vname(n), work, debug_forcing, & field_loc_NEcorner, field_type_vector) else call ice_read_nc(fid, m, vname(n), work, debug_forcing, & - field_loc_center, field_type_scalar) + field_loc_center, field_type_scalar) endif ocn_frc_m(:,:,:,n,m) = work enddo ! month loop @@ -5316,7 +5307,6 @@ subroutine box2001_data_atm ! authors: Elizabeth Hunke, LANL use ice_domain, only: nblocks, blocks_ice - use ice_domain_size, only: max_blocks use ice_calendar, only: timesecs use ice_blocks, only: block, get_block, nx_block, ny_block, nghost use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray @@ -5347,8 +5337,8 @@ subroutine box2001_data_atm period = c4*secday do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block + do j = 1, ny_block + do i = 1, nx_block this_block = get_block(blocks_ice(iblk),iblk) iglob = this_block%i_glob @@ -5396,8 +5386,8 @@ subroutine box2001_data_atm ! / real(ny_global,kind=dbl_kind) ! initialization test - enddo - enddo + enddo + enddo enddo ! nblocks end subroutine box2001_data_atm @@ -5411,8 +5401,6 @@ subroutine box2001_data_ocn ! authors: Elizabeth Hunke, LANL use ice_domain, only: nblocks, blocks_ice - use ice_domain_size, only: max_blocks - use ice_calendar, only: timesecs use ice_blocks, only: block, get_block, nx_block, ny_block, nghost use ice_flux, only: uocn, vocn use ice_grid, only: uvm @@ -5429,16 +5417,13 @@ subroutine box2001_data_ocn type (block) :: & this_block ! block information for current block - real (kind=dbl_kind) :: & - secday, pi , puny, period, pi2, tau - character(len=*), parameter :: subname = '(box2001_data_ocn)' if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block + do j = 1, ny_block + do i = 1, nx_block this_block = get_block(blocks_ice(iblk),iblk) iglob = this_block%i_glob @@ -5454,8 +5439,8 @@ subroutine box2001_data_ocn uocn(i,j,iblk) = uocn(i,j,iblk) * uvm(i,j,iblk) vocn(i,j,iblk) = vocn(i,j,iblk) * uvm(i,j,iblk) - enddo - enddo + enddo + enddo enddo ! nblocks end subroutine box2001_data_ocn @@ -5466,7 +5451,6 @@ subroutine uniform_data_atm(dir,spd) ! uniform wind fields in some direction use ice_domain, only: nblocks - use ice_domain_size, only: max_blocks use ice_blocks, only: nx_block, ny_block, nghost use ice_flux, only: uatm, vatm, wind, rhoa, strax, stray use ice_state, only: aice @@ -5516,17 +5500,17 @@ subroutine uniform_data_atm(dir,spd) endif do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block + do j = 1, ny_block + do i = 1, nx_block ! wind stress wind(i,j,iblk) = sqrt(uatm(i,j,iblk)**2 + vatm(i,j,iblk)**2) tau = rhoa(i,j,iblk) * 0.0012_dbl_kind * wind(i,j,iblk) strax(i,j,iblk) = aice(i,j,iblk) * tau * uatm(i,j,iblk) stray(i,j,iblk) = aice(i,j,iblk) * tau * vatm(i,j,iblk) - + + enddo enddo - enddo enddo ! nblocks end subroutine uniform_data_atm @@ -5537,25 +5521,19 @@ subroutine uniform_data_ocn(dir,spd) ! uniform current fields in some direction - use ice_domain, only: nblocks - use ice_domain_size, only: max_blocks - use ice_blocks, only: nx_block, ny_block, nghost use ice_flux, only: uocn, vocn character(len=*), intent(in) :: dir - real(kind=dbl_kind), intent(in), optional :: spd ! velocity + real(kind=dbl_kind), intent(in), optional :: spd ! velocity ! local parameters - integer (kind=int_kind) :: & - iblk, i,j ! loop indices - real(kind=dbl_kind) :: & ocn_val ! value to use for ocean currents character(len=*), parameter :: subname = '(uniform_data_ocn)' - + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' if (present(spd)) then @@ -5583,9 +5561,9 @@ end subroutine uniform_data_ocn !======================================================================= subroutine get_wave_spec - + use ice_read_write, only: ice_read_nc_xyf - use ice_arrays_column, only: wave_spectrum, wave_sig_ht, & + use ice_arrays_column, only: wave_spectrum, & dwavefreq, wavefreq use ice_constants, only: c0 use ice_domain_size, only: nfreq @@ -5593,8 +5571,7 @@ subroutine get_wave_spec ! local variables integer (kind=int_kind) :: & - fid, & ! file id for netCDF routines - k + fid ! file id for netCDF routines real(kind=dbl_kind), dimension(nfreq) :: & wave_spectrum_profile ! wave spectrum @@ -5686,9 +5663,6 @@ subroutine init_snowtable snw_aging_table, & ! aging table setting fieldname ! field name in netcdf file - integer (kind=int_kind) :: & - j, k ! indices - character(len=*), parameter :: subname = '(init_snowtable)' !----------------------------------------------------------------- @@ -5816,7 +5790,8 @@ subroutine init_snowtable write(nu_diag,*) subname,' snoage_tau (1,1,1) = ',snowage_tau (1,1,1) write(nu_diag,*) subname,' snoage_kappa (1,1,1) = ',snowage_kappa(1,1,1) write(nu_diag,*) subname,' snoage_drdt0 (1,1,1) = ',snowage_drdt0(1,1,1) - write(nu_diag,*) subname,' Data at rhos, Tgrd, T = ',snowage_rhos(idx_rhos_max),snowage_Tgrd(idx_Tgrd_max),snowage_T(idx_T_max) + write(nu_diag,*) subname,' Data at rhos, Tgrd, T = ', & + snowage_rhos(idx_rhos_max),snowage_Tgrd(idx_Tgrd_max),snowage_T(idx_T_max) write(nu_diag,*) subname,' snoage_tau (max,max,max) = ',snowage_tau (idx_rhos_max, idx_Tgrd_max, idx_T_max) write(nu_diag,*) subname,' snoage_kappa (max,max,max) = ',snowage_kappa(idx_rhos_max, idx_Tgrd_max, idx_T_max) write(nu_diag,*) subname,' snoage_drdt0 (max,max,max) = ',snowage_drdt0(idx_rhos_max, idx_Tgrd_max, idx_T_max) diff --git a/cicecore/cicedynB/general/ice_forcing_bgc.F90 b/cicecore/cicedynB/general/ice_forcing_bgc.F90 index 383d388de..fc440834c 100644 --- a/cicecore/cicedynB/general/ice_forcing_bgc.F90 +++ b/cicecore/cicedynB/general/ice_forcing_bgc.F90 @@ -192,7 +192,7 @@ subroutine get_forcing_bgc ! Read two monthly silicate values and interpolate. ! Restore toward interpolated value. !------------------------------------------------------------------- - + if (trim(bgc_data_type)=='clim' .AND. tr_bgc_Sil) then ! call read_clim_data (readm, 0, ixm, mmonth, ixp, & ! sil_file, sil_data, & @@ -202,7 +202,7 @@ subroutine get_forcing_bgc sil_file, fieldname, sil_data, & field_loc_center, field_type_scalar) call interpolate_data (sil_data, sildat) - + if (istep == 1 .or. .NOT. restore_bgc) then !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) @@ -275,7 +275,7 @@ subroutine get_forcing_bgc ! Restore toward interpolated value. !------------------------------------------------------------------- - if (trim(bgc_data_type)=='clim' .AND. tr_bgc_Nit) then + if (trim(bgc_data_type)=='clim' .AND. tr_bgc_Nit) then ! call read_clim_data (readm, 0, ixm, mmonth, ixp, & ! nit_file, nit_data, & ! field_loc_center, field_type_scalar) @@ -321,7 +321,7 @@ subroutine get_forcing_bgc do i = ilo, ihi nit(i,j,iblk) = nit(i,j,iblk) & - + (nitdat(i,j,iblk)-nit(i,j,iblk))*dt/trest + + (nitdat(i,j,iblk)-nit(i,j,iblk))*dt/trest ks = icepack_max_algae + 1 ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !nit ks = 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic @@ -332,7 +332,7 @@ subroutine get_forcing_bgc !$OMP END PARALLEL DO endif !restore_bgc -! elseif (trim(nit_data_type) == 'sss' .AND. tr_bgc_Nit) then +! elseif (trim(nit_data_type) == 'sss' .AND. tr_bgc_Nit) then ! !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) ! do iblk = 1, nblocks @@ -345,11 +345,11 @@ subroutine get_forcing_bgc ! do j = jlo, jhi ! do i = ilo, ihi -! nit(i,j,iblk) = sss(i,j,iblk) +! nit(i,j,iblk) = sss(i,j,iblk) ! ks = icepack_max_algae + 1 -! ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !nit +! ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !nit ! ks = 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic -! ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !PON +! ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !PON ! enddo ! enddo ! enddo @@ -367,12 +367,12 @@ subroutine get_forcing_bgc do j = jlo, jhi do i = ilo, ihi - + nit(i,j,iblk) = 12.0_dbl_kind ks = icepack_max_algae + 1 - ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !nit + ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !nit ks = 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic - ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !PON + ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !PON enddo enddo enddo @@ -381,15 +381,15 @@ subroutine get_forcing_bgc endif !tr_bgc_Nit !------------------------------------------------------------------- - ! Data from Papdimitrious et al., 2007, Limnol. Oceanogr. - ! and WOA at 68oS, 304.5oE : - ! daily data located at the end of the 24-hour period. + ! Data from Papdimitrious et al., 2007, Limnol. Oceanogr. + ! and WOA at 68oS, 304.5oE : + ! daily data located at the end of the 24-hour period. !------------------------------------------------------------------- if (trim(bgc_data_type) == 'ISPOL') then nit_file = trim(bgc_data_dir)//'nutrients_daily_ISPOL_WOA_field3.nc' - sil_file = trim(bgc_data_dir)//'nutrients_daily_ISPOL_WOA_field3.nc' + sil_file = trim(bgc_data_dir)//'nutrients_daily_ISPOL_WOA_field3.nc' if (my_task == master_task .and. istep == 1) then if (tr_bgc_Sil) then @@ -408,45 +408,45 @@ subroutine get_forcing_bgc dataloc = 2 ! data located at end of interval sec1hr = secday ! seconds in day - maxrec = 365 ! + maxrec = 365 ! ! current record number - recnum = int(yday) + recnum = int(yday) ! Compute record numbers for surrounding data (2 on each side) ixm = mod(recnum+maxrec-2,maxrec) + 1 ixx = mod(recnum-1, maxrec) + 1 - + recslot = 2 ixp = -99 call interp_coeff (recnum, recslot, sec1hr, dataloc) read1 = .false. if (istep==1 .or. bgcrecnum .ne. recnum) read1 = .true. - - + + if (tr_bgc_Sil) then met_file = sil_file - fieldname= 'silicate' + fieldname= 'silicate' call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & maxrec, met_file, fieldname, sil_data_p, & field_loc_center, field_type_scalar) - + sil(:,:,:) = c1intp * sil_data_p(1) & + c2intp * sil_data_p(2) endif if (tr_bgc_Nit) then met_file = nit_file - fieldname= 'nitrate' + fieldname= 'nitrate' call read_data_nc_point(read1, 0, fyear, ixm, ixx, ixp, & maxrec, met_file, fieldname, nit_data_p, & field_loc_center, field_type_scalar) - + nit(:,:,:) = c1intp * nit_data_p(1) & + c2intp * nit_data_p(2) endif - + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks @@ -458,13 +458,13 @@ subroutine get_forcing_bgc do j = jlo, jhi do i = ilo, ihi - + ks = 2*icepack_max_algae + icepack_max_doc + 3 + icepack_max_dic - ocean_bio_all(i,j,ks,iblk) = sil(i,j,iblk) !Sil + ocean_bio_all(i,j,ks,iblk) = sil(i,j,iblk) !Sil ks = icepack_max_algae + 1 ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !nit ks = 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic - ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !PON + ocean_bio_all(i,j,ks,iblk) = nit(i,j,iblk) !PON enddo enddo enddo @@ -480,11 +480,11 @@ end subroutine get_forcing_bgc ! ! author: Nicole Jeffery, LANL - subroutine get_atm_bgc + subroutine get_atm_bgc use ice_blocks, only: block, get_block use ice_domain, only: nblocks, blocks_ice - use ice_domain_size, only: n_zaero + use ice_domain_size, only: n_zaero use ice_flux_bgc, only: flux_bio_atm, faero_atm ! local variables @@ -492,7 +492,7 @@ subroutine get_atm_bgc integer (kind=int_kind) :: & i, j, nn , & ! horizontal indices ilo,ihi,jlo,jhi, & ! beginning and end of physical domain - iblk ! block index + iblk ! block index logical (kind=log_kind) :: & tr_zaero @@ -520,15 +520,15 @@ subroutine get_atm_bgc !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,nn) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - - do nn = 1, n_zaero + + do nn = 1, n_zaero do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi flux_bio_atm(i,j,nlt_zaero(nn),iblk) = faero_atm(i,j,nn,iblk) enddo enddo @@ -569,10 +569,10 @@ subroutine faero_default faero_atm(:,:,1,:) = 1.e-12_dbl_kind ! kg/m^2 s faero_atm(:,:,2,:) = 1.e-13_dbl_kind - faero_atm(:,:,3,:) = 1.e-14_dbl_kind - faero_atm(:,:,4,:) = 1.e-14_dbl_kind - faero_atm(:,:,5,:) = 1.e-14_dbl_kind - faero_atm(:,:,6,:) = 1.e-14_dbl_kind + faero_atm(:,:,3,:) = 1.e-14_dbl_kind + faero_atm(:,:,4,:) = 1.e-14_dbl_kind + faero_atm(:,:,5,:) = 1.e-14_dbl_kind + faero_atm(:,:,6,:) = 1.e-14_dbl_kind end subroutine faero_default @@ -598,11 +598,11 @@ subroutine faero_data aero2_data , & ! field values at 2 temporal data points aero3_data ! field values at 2 temporal data points - character (char_len_long) :: & + character (char_len_long) :: & aero_file, & ! netcdf filename fieldname ! field name in netcdf file - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ixm,ixp , & ! record numbers for neighboring months maxrec , & ! maximum record number recslot , & ! spline slot for current record @@ -618,9 +618,9 @@ subroutine faero_data !------------------------------------------------------------------- - ! monthly data + ! monthly data ! - ! Assume that monthly data values are located in the middle of the + ! Assume that monthly data values are located in the middle of the ! month. !------------------------------------------------------------------- @@ -645,12 +645,12 @@ subroutine faero_data ! Find interpolation coefficients call interp_coeff_monthly (recslot) - ! Read 2 monthly values + ! Read 2 monthly values readm = .false. if (istep==1 .or. (mday==midmonth .and. msec==0)) readm = .true. -! aero_file = trim(atm_data_dir)//'faero.nc' - aero_file = '/usr/projects/climate/eclare/DATA/gx1v3/faero.nc' +! aero_file = trim(atm_data_dir)//'faero.nc' + aero_file = '/usr/projects/climate/eclare/DATA/gx1v3/faero.nc' fieldname='faero_atm001' call read_clim_data_nc (readm, 0, ixm, mmonth, ixp, & @@ -695,11 +695,11 @@ subroutine fzaero_data save :: & aero_data ! field values at 2 temporal data points - character (char_len_long) :: & + character (char_len_long) :: & aero_file, & ! netcdf filename fieldname ! field name in netcdf file - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ixm,ixp , & ! record numbers for neighboring months maxrec , & ! maximum record number recslot , & ! spline slot for current record @@ -720,9 +720,9 @@ subroutine fzaero_data allocate( aero_data(nx_block,ny_block,2,max_blocks) ) !------------------------------------------------------------------- - ! monthly data + ! monthly data ! - ! Assume that monthly data values are located in the middle of the + ! Assume that monthly data values are located in the middle of the ! month. !------------------------------------------------------------------- @@ -747,13 +747,13 @@ subroutine fzaero_data ! Find interpolation coefficients call interp_coeff_monthly (recslot) - ! Read 2 monthly values + ! Read 2 monthly values readm = .false. if (istep==1 .or. (mday==midmonth .and. msec==0)) readm = .true. -! aero_file = trim(atm_data_dir)//'faero.nc' +! aero_file = trim(atm_data_dir)//'faero.nc' ! Cam5 monthly total black carbon deposition on the gx1 grid" - aero_file = '/usr/projects/climate/njeffery/DATA/CAM/Hailong_Wang/Cam5_bc_monthly_popgrid.nc' + aero_file = '/usr/projects/climate/njeffery/DATA/CAM/Hailong_Wang/Cam5_bc_monthly_popgrid.nc' fieldname='bcd' call read_clim_data_nc (readm, 0, ixm, mmonth, ixp, & @@ -786,11 +786,11 @@ subroutine init_bgc_data (fed1,fep1) ! local parameters integer (kind=int_kind) :: & - fid ! file id for netCDF file + fid ! file id for netCDF file logical (kind=log_kind) :: diag - character (char_len_long) :: & + character (char_len_long) :: & iron_file, & ! netcdf filename fieldname ! field name in netcdf file @@ -802,7 +802,7 @@ subroutine init_bgc_data (fed1,fep1) !------------------------------------------------------------------- if (trim(fe_data_type) == 'clim') then - diag = .true. ! write diagnostic information + diag = .true. ! write diagnostic information iron_file = trim(bgc_data_dir)//'dFe_50m_annual_Tagliabue_gx1.nc' if (my_task == master_task) then @@ -814,12 +814,12 @@ subroutine init_bgc_data (fed1,fep1) fieldname='dFe' ! Currently only first fed value is read - call ice_read_nc(fid,1,fieldname,fed1,diag) - where ( fed1(:,:,:) > 1.e20) fed1(:,:,:) = p1 + call ice_read_nc(fid,1,fieldname,fed1,diag) + where ( fed1(:,:,:) > 1.e20) fed1(:,:,:) = p1 - if (my_task == master_task) call ice_close_nc(fid) + if (my_task == master_task) call ice_close_nc(fid) - diag = .true. ! write diagnostic information + diag = .true. ! write diagnostic information iron_file = trim(bgc_data_dir)//'pFe_bathy_gx1.nc' if (my_task == master_task) then @@ -831,13 +831,13 @@ subroutine init_bgc_data (fed1,fep1) fieldname='pFe' ! Currently only first fep value is read - call ice_read_nc(fid,1,fieldname,fep1,diag) - where ( fep1(:,:,:) > 1.e20) fep1(:,:,:) = p1 + call ice_read_nc(fid,1,fieldname,fep1,diag) + where ( fep1(:,:,:) > 1.e20) fep1(:,:,:) = p1 + + if (my_task == master_task) call ice_close_nc(fid) - if (my_task == master_task) call ice_close_nc(fid) - endif - + end subroutine init_bgc_data !======================================================================= @@ -871,7 +871,7 @@ subroutine faero_optics logical (kind=log_kind) :: modal_aero - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid , & ! variable id status , & ! status output from netcdf routines n, k ! index @@ -880,9 +880,9 @@ subroutine faero_optics amin, amax, asum ! min, max values and sum of input array integer (kind=int_kind) :: & - fid ! file id for netCDF file + fid ! file id for netCDF file - character (char_len_long) :: & + character (char_len_long) :: & fieldname ! field name in netcdf file character(len=*), parameter :: subname = '(faero_optics)' @@ -972,12 +972,12 @@ subroutine faero_optics fieldname=optics_file_fieldname status = nf90_inq_varid(fid, trim(fieldname), varid) - + if (status /= nf90_noerr) then call abort_ice (subname//'ERROR: Cannot find variable '//trim(fieldname)) endif status = nf90_get_var( fid, varid, bcenh, & - start=(/1,1,1,1/), & + start=(/1,1,1,1/), & count=(/3,10,8,1/) ) do n=1,10 amin = minval(bcenh(:,n,:)) @@ -985,13 +985,13 @@ subroutine faero_optics asum = sum (bcenh(:,n,:)) write(nu_diag,*) ' min, max, sum =', amin, amax, asum enddo - call ice_close_nc(fid) + call ice_close_nc(fid) endif !master_task do n=1,3 do k=1,8 call broadcast_array(bcenh(n,:,k), master_task) enddo - enddo + enddo #else call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 0c368a413..c2cc986f8 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -5,7 +5,7 @@ ! authors Elizabeth C. Hunke and William H. Lipscomb, LANL ! C. M. Bitz, UW ! -! 2004 WHL: Block structure added +! 2004 WHL: Block structure added ! 2006 ECH: Added namelist variables, warnings. ! Replaced old default initial ice conditions with 3.14 version. ! Converted to free source form (F90). @@ -97,7 +97,7 @@ subroutine input_data atm_data_format, ocn_data_format, & bgc_data_type, & ocn_data_type, ocn_data_dir, wave_spec_file, & - oceanmixed_file, restore_ocn, trestore, & + oceanmixed_file, restore_ocn, trestore, & ice_data_type, ice_data_conc, ice_data_dist, & snw_filename, & snw_tau_fname, snw_kappa_fname, snw_drdt0_fname, & @@ -108,7 +108,7 @@ subroutine input_data bathymetry_format, kmt_type, & grid_type, grid_format, & grid_ice, grid_ice_thrm, grid_ice_dynu, grid_ice_dynv, & - grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & + grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & grid_atm, grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & dxrect, dyrect, & pgl_global_ext @@ -140,7 +140,9 @@ subroutine input_data nml_error, & ! namelist i/o error flag n ! loop index +#ifdef CESMCOUPLED logical :: exists +#endif real (kind=dbl_kind) :: ustar_min, albicev, albicei, albsnowv, albsnowi, & ahmax, R_ice, R_pnd, R_snw, dT_mlt, rsnw_mlt, emissivity, & @@ -171,13 +173,15 @@ subroutine input_data integer (kind=int_kind) :: numin, numax ! unit number limits #ifdef UNDEPRECATE_CESMPONDS - integer (kind=int_kind) :: rpcesm, rplvl, rptopo + integer (kind=int_kind) :: rpcesm, rplvl, rptopo #else - integer (kind=int_kind) :: rplvl, rptopo + integer (kind=int_kind) :: rplvl, rptopo #endif real (kind=dbl_kind) :: Cf, ksno, puny character (len=char_len) :: abort_list +#ifdef CESMCOUPLED character (len=64) :: tmpstr +#endif character (len=128) :: tmpstr2 character(len=*), parameter :: subname='(input_data)' @@ -249,7 +253,7 @@ subroutine input_data k1, k2, alphab, threshold_hw, & deltaminEVP, deltaminVP, capping_method, & Cf, Pstar, Cstar, Ktens - + namelist /shortwave_nml/ & shortwave, albedo_type, & albicev, albicei, albsnowv, albsnowi, & @@ -304,11 +308,11 @@ subroutine input_data istep0 = 0 ! no. of steps taken in previous integrations, ! real (dumped) or imagined (to set calendar) #ifndef CESMCOUPLED - dt = 3600.0_dbl_kind ! time step, s + dt = 3600.0_dbl_kind ! time step, s #endif numin = 11 ! min allowed unit number numax = 99 ! max allowed unit number - npt = 99999 ! total number of time steps (dt) + npt = 99999 ! total number of time steps (dt) npt_unit = '1' ! units of npt 'y', 'm', 'd', 's', '1' diagfreq = 24 ! how often diag output is written debug_model = .false. ! debug output @@ -329,7 +333,7 @@ subroutine input_data histfreq(3) = 'd' ! output frequency option for different streams histfreq(4) = 'm' ! output frequency option for different streams histfreq(5) = 'y' ! output frequency option for different streams - histfreq_n(:) = 1 ! output frequency + histfreq_n(:) = 1 ! output frequency histfreq_base = 'zero' ! output frequency reference date hist_avg = .true. ! if true, write time-averages (not snapshots) history_format = 'default' ! history file format @@ -377,20 +381,20 @@ subroutine input_data kdyn = 1 ! type of dynamics (-1, 0 = off, 1 = evp, 2 = eap, 3 = vp) ndtd = 1 ! dynamic time steps per thermodynamic time step ndte = 120 ! subcycles per dynamics timestep: ndte=dt_dyn/dte - evp_algorithm = 'standard_2d' ! EVP kernel (=standard_2d: standard cice evp; =shared_mem_1d: 1d shared memory and no mpi. if more mpi processors then executed on master + evp_algorithm = 'standard_2d' ! EVP kernel (standard_2d=standard cice evp; shared_mem_1d=1d shared memory and no mpi elasticDamp = 0.36_dbl_kind ! coefficient for calculating the parameter E pgl_global_ext = .false. ! if true, init primary grid lengths (global ext.) brlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared arlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared revised_evp = .false. ! if true, use revised procedure for evp dynamics - yield_curve = 'ellipse' ! yield curve + yield_curve = 'ellipse' ! yield curve kstrength = 1 ! 1 = Rothrock 75 strength, 0 = Hibler 79 Pstar = 2.75e4_dbl_kind ! constant in Hibler strength formula (kstrength = 0) Cstar = 20._dbl_kind ! constant in Hibler strength formula (kstrength = 0) krdg_partic = 1 ! 1 = new participation, 0 = Thorndike et al 75 krdg_redist = 1 ! 1 = new redistribution, 0 = Hibler 80 mu_rdg = 3 ! e-folding scale of ridged ice, krdg_partic=1 (m^0.5) - Cf = 17.0_dbl_kind ! ratio of ridging work to PE change in ridging + Cf = 17.0_dbl_kind ! ratio of ridging work to PE change in ridging ksno = 0.3_dbl_kind ! snow thermal conductivity dxrect = 0.0_dbl_kind ! user defined grid spacing in cm in x direction dyrect = 0.0_dbl_kind ! user defined grid spacing in cm in y direction @@ -402,14 +406,15 @@ subroutine input_data alphab = 20.0_dbl_kind ! alphab=Cb factor in Lemieux et al 2015 threshold_hw = 30.0_dbl_kind ! max water depth for grounding Ktens = 0.0_dbl_kind ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) - e_yieldcurve = 2.0_dbl_kind ! VP aspect ratio of elliptical yield curve + e_yieldcurve = 2.0_dbl_kind ! VP aspect ratio of elliptical yield curve e_plasticpot = 2.0_dbl_kind ! VP aspect ratio of elliptical plastic potential visc_method = 'avg_zeta' ! calc viscosities at U point: avg_strength, avg_zeta deltaminEVP = 1e-11_dbl_kind ! minimum delta for viscosities (EVP, Hunke 2001) deltaminVP = 2e-9_dbl_kind ! minimum delta for viscosities (VP, Hibler 1979) capping_method = 'max' ! method for capping of viscosities (max=Hibler 1979,sum=Kreyscher2000) maxits_nonlin = 4 ! max nb of iteration for nonlinear solver - precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), 'pgmres' (Jacobi-preconditioned GMRES) + precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), + ! 'pgmres' (Jacobi-preconditioned GMRES) dim_fgmres = 50 ! size of fgmres Krylov subspace dim_pgmres = 5 ! size of pgmres Krylov subspace maxits_fgmres = 50 ! max nb of iteration for fgmres @@ -422,7 +427,8 @@ subroutine input_data reltol_fgmres = 1e-2_dbl_kind ! fgmres stopping criterion: reltol_fgmres*res(k) reltol_pgmres = 1e-6_dbl_kind ! pgmres stopping criterion: reltol_pgmres*res(k) algo_nonlin = 'picard' ! nonlinear algorithm: 'picard' (Picard iteration), 'anderson' (Anderson acceleration) - fpfunc_andacc = 1 ! fixed point function for Anderson acceleration: 1: g(x) = FMGRES(A(x),b(x)), 2: g(x) = x - A(x)x + b(x) + fpfunc_andacc = 1 ! fixed point function for Anderson acceleration: + ! 1: g(x) = FMGRES(A(x),b(x)), 2: g(x) = x - A(x)x + b(x) dim_andacc = 5 ! size of Anderson minimization matrix (number of saved previous residuals) reltol_andacc = 1e-6_dbl_kind ! relative tolerance for Anderson acceleration damping_andacc = 0 ! damping factor for Anderson acceleration @@ -463,7 +469,7 @@ subroutine input_data hp1 = 0.01_dbl_kind ! critical pond lid thickness for topo ponds hs0 = 0.03_dbl_kind ! snow depth for transition to bare sea ice (m) hs1 = 0.03_dbl_kind ! snow depth for transition to bare pond ice (m) - dpscale = c1 ! alter e-folding time scale for flushing + dpscale = c1 ! alter e-folding time scale for flushing frzpnd = 'cesm' ! melt pond refreezing parameterization rfracmin = 0.15_dbl_kind ! minimum retained fraction of meltwater rfracmax = 0.85_dbl_kind ! maximum retained fraction of meltwater @@ -544,7 +550,7 @@ subroutine input_data restart_age = .false. ! ice age restart tr_FY = .false. ! ice age restart_FY = .false. ! ice age restart - tr_lvl = .false. ! level ice + tr_lvl = .false. ! level ice restart_lvl = .false. ! level ice restart #ifdef UNDEPRECATE_CESMPONDS tr_pond_cesm = .false. ! CESM melt ponds @@ -818,7 +824,7 @@ subroutine input_data call broadcast_scalar(diag_file, master_task) do n = 1, max_nstrm call broadcast_scalar(histfreq(n), master_task) - enddo + enddo call broadcast_array(histfreq_n, master_task) call broadcast_scalar(histfreq_base, master_task) call broadcast_scalar(hist_avg, master_task) @@ -1229,7 +1235,7 @@ subroutine input_data abort_list = trim(abort_list)//":45" endif endif - + #ifdef UNDEPRECATE_CESMPONDS rpcesm = 0 #endif @@ -1493,7 +1499,7 @@ subroutine input_data if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=F and fbot_xfer_type=Cdn_ocn' abort_list = trim(abort_list)//":19" endif - + if(history_precision .ne. 4 .and. history_precision .ne. 8) then write (nu_diag,*) subname//' ERROR: bad value for history_precision, allowed values: 4, 8' abort_list = trim(abort_list)//":22" @@ -1530,12 +1536,12 @@ subroutine input_data endif abort_list = trim(abort_list)//":60" endif - + if (trim(algo_nonlin) == 'picard') then ! Picard solver is implemented in the Anderson solver; reset number of saved residuals to zero dim_andacc = 0 endif - + if (.not. (trim(precond) == 'ident' .or. trim(precond) == 'diag' .or. trim(precond) == 'pgmres')) then if (my_task == master_task) then write(nu_diag,*) subname//' ERROR: unknown precond: '//precond @@ -1543,7 +1549,7 @@ subroutine input_data endif abort_list = trim(abort_list)//":61" endif - + if (.not. (trim(ortho_type) == 'cgs' .or. trim(ortho_type) == 'mgs')) then if (my_task == master_task) then write(nu_diag,*) subname//' ERROR: unknown ortho_type: '//ortho_type @@ -1738,7 +1744,7 @@ subroutine input_data tmpstr2 = ' : revised EVP formulation not used' endif write(nu_diag,1010) ' revised_evp = ', revised_evp,trim(tmpstr2) - + if (evp_algorithm == 'standard_2d') then tmpstr2 = ' : standard 2d EVP solver' elseif (evp_algorithm == 'shared_mem_1d') then @@ -1807,7 +1813,7 @@ subroutine input_data tmpstr2 = ' : no seabed stress parameterization' endif write(nu_diag,1010) ' seabed_stress = ', seabed_stress,trim(tmpstr2) - if (seabed_stress) then + if (seabed_stress) then write(nu_diag,1030) ' seabed method = ',trim(seabed_stress_method) if (seabed_stress_method == 'LKD') then write(nu_diag,1002) ' k1 = ', k1, ' : free parameter for landfast ice' @@ -1821,7 +1827,7 @@ subroutine input_data if (grid_ice == 'C' .or. grid_ice == 'CD') then write(nu_diag,1030) ' visc_method= ', trim(visc_method),' : viscosities method (U point)' endif - + write(nu_diag,1002) ' Ktens = ', Ktens, ' : tensile strength factor' if (kdyn == 3) then @@ -2316,11 +2322,11 @@ subroutine input_data trim(ocn_data_type) /= 'default') then write(nu_diag,1031) ' ocn_data_dir = ', trim(ocn_data_dir) write(nu_diag,1011) ' restore_ocn = ', restore_ocn - endif + endif write(nu_diag,1011) ' restore_ice = ', restore_ice if (restore_ice .or. restore_ocn) & write(nu_diag,1021) ' trestore = ', trestore - + write(nu_diag,*) ' ' write(nu_diag,'(a31,2f8.2)') 'Diagnostic point 1: lat, lon =', & latpnt(1), lonpnt(1) @@ -2392,9 +2398,9 @@ subroutine input_data if (kdyn == 1 .and. & evp_algorithm /= 'standard_2d' .and. & - evp_algorithm /= 'shared_mem_1d') then + evp_algorithm /= 'shared_mem_1d') then if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown evp_algorithm=',trim(evp_algorithm) - abort_list = trim(abort_list)//":21" + abort_list = trim(abort_list)//":21" endif if (abort_list /= "") then @@ -2548,7 +2554,7 @@ subroutine init_state !----------------------------------------------------------------- if (my_task == master_task) then - + if (nilyr < 1) then write(nu_diag,*) subname//' ERROR: Must have at least one ice layer' write(nu_diag,*) subname//' ERROR: nilyr =', nilyr @@ -2649,11 +2655,11 @@ subroutine init_state do it = 1, ntrcr ! mask for base quantity on which tracers are carried if (trcr_depend(it) == 0) then ! area - trcr_base(it,1) = c1 + trcr_base(it,1) = c1 elseif (trcr_depend(it) == 1) then ! ice volume - trcr_base(it,2) = c1 + trcr_base(it,2) = c1 elseif (trcr_depend(it) == 2) then ! snow volume - trcr_base(it,3) = c1 + trcr_base(it,3) = c1 else trcr_base(it,1) = c1 ! default: ice area trcr_base(it,2) = c0 @@ -2698,7 +2704,7 @@ subroutine init_state !$OMP iglob,jglob) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -2736,13 +2742,13 @@ subroutine init_state call grid_average_X2Y('S',vvel,'U',vvelN,'N') call grid_average_X2Y('S',uvel,'U',uvelE,'E') call grid_average_X2Y('S',vvel,'U',vvelE,'E') - + ! Halo update on North, East faces call ice_HaloUpdate(uvelN, halo_info, & field_loc_Nface, field_type_scalar) call ice_HaloUpdate(vvelN, halo_info, & field_loc_Nface, field_type_scalar) - + call ice_HaloUpdate(uvelE, halo_info, & field_loc_Eface, field_type_scalar) call ice_HaloUpdate(vvelE, halo_info, & @@ -2821,7 +2827,7 @@ subroutine set_state_var (nx_block, ny_block, & use ice_arrays_column, only: hin_max use ice_domain_size, only: nilyr, nslyr, nx_global, ny_global, ncat - use ice_grid, only: grid_type, dxrect, dyrect + use ice_grid, only: dxrect, dyrect use ice_forcing, only: ice_data_type, ice_data_conc, ice_data_dist integer (kind=int_kind), intent(in) :: & @@ -2831,7 +2837,7 @@ subroutine set_state_var (nx_block, ny_block, & iglob(nx_block) , & ! global indices jglob(ny_block) ! - character(len=char_len_long), intent(in) :: & + character(len=char_len_long), intent(in) :: & ice_ic ! method of ice cover initialization logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & @@ -2843,8 +2849,8 @@ subroutine set_state_var (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & Tair , & ! air temperature (K) - Tf , & ! freezing temperature (C) - sst ! sea surface temperature (C) + Tf , & ! freezing temperature (C) + sst ! sea surface temperature (C) real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), intent(in) :: & salinz , & ! initial salinity profile @@ -2861,7 +2867,7 @@ subroutine set_state_var (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & uvel , & ! ice velocity B grid - vvel ! + vvel ! ! local variables integer (kind=int_kind) :: & @@ -2902,7 +2908,7 @@ subroutine set_state_var (nx_block, ny_block, & real (kind=dbl_kind), parameter :: & hsno_init = 0.20_dbl_kind , & ! initial snow thickness (m) - edge_init_nh = 70._dbl_kind, & ! initial ice edge, N.Hem. (deg) + edge_init_nh = 70._dbl_kind, & ! initial ice edge, N.Hem. (deg) edge_init_sh = -60._dbl_kind ! initial ice edge, S.Hem. (deg) real (kind=dbl_kind) :: & ! boxslotcyl @@ -2950,7 +2956,7 @@ subroutine set_state_var (nx_block, ny_block, & vicen(i,j,n) = c0 vsnon(i,j,n) = c0 if (tmask(i,j)) then - trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature + trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature else trcrn(i,j,nt_Tsfc,n) = c0 ! at land grid cells (for clean history/restart files) endif @@ -3022,9 +3028,9 @@ subroutine set_state_var (nx_block, ny_block, & ! initial category areas in cells with ice hbar = c3 ! initial ice thickness with greatest area - ! Note: the resulting average ice thickness + ! Note: the resulting average ice thickness ! tends to be less than hbar due to the - ! nonlinear distribution of ice thicknesses + ! nonlinear distribution of ice thicknesses sum = c0 do n = 1, ncat if (n < ncat) then @@ -3083,7 +3089,7 @@ subroutine set_state_var (nx_block, ny_block, & if (tmask(i,j)) then ! check if grid point is inside slotted cylinder in_slot = (dxrect*real(iglob(i)-1, kind=dbl_kind) >= center_x - width/c2) .and. & - (dxrect*real(iglob(i)-1, kind=dbl_kind) <= center_x + width/c2) .and. & + (dxrect*real(iglob(i)-1, kind=dbl_kind) <= center_x + width/c2) .and. & (dyrect*real(jglob(j)-1, kind=dbl_kind) >= center_y - radius) .and. & (dyrect*real(jglob(j)-1, kind=dbl_kind) <= center_y + (length - radius)) @@ -3254,7 +3260,7 @@ subroutine set_state_var (nx_block, ny_block, & ! surface temperature trcrn(i,j,nt_Tsfc,n) = Tsfc ! deg C - ! ice enthalpy, salinity + ! ice enthalpy, salinity do k = 1, nilyr trcrn(i,j,nt_qice+k-1,n) = qin(k) trcrn(i,j,nt_sice+k-1,n) = salinz(i,j,k) @@ -3268,7 +3274,7 @@ subroutine set_state_var (nx_block, ny_block, & enddo ! ij enddo ! ncat - + !--------------------------------------------------------- ! ice velocity ! these velocites are defined on B-grid diff --git a/cicecore/cicedynB/general/ice_state.F90 b/cicecore/cicedynB/general/ice_state.F90 index e07eca209..d5c115a0c 100644 --- a/cicecore/cicedynB/general/ice_state.F90 +++ b/cicecore/cicedynB/general/ice_state.F90 @@ -10,7 +10,7 @@ ! aicen(i,j,n) aice(i,j) --- ! vicen(i,j,n) vice(i,j) m ! vsnon(i,j,n) vsno(i,j) m -! trcrn(i,j,it,n) trcr(i,j,it) +! trcrn(i,j,it,n) trcr(i,j,it) ! ! Area is dimensionless because aice is the fractional area ! (normalized so that the sum over all categories, including open @@ -118,7 +118,7 @@ module ice_state strength ! ice strength (N/m) !----------------------------------------------------------------- - ! ice state at start of time step, saved for later in the step + ! ice state at start of time step, saved for later in the step !----------------------------------------------------------------- real (kind=dbl_kind), dimension(:,:,:), allocatable, & @@ -129,7 +129,7 @@ module ice_state dimension(:,:,:,:), allocatable, public :: & aicen_init , & ! initial ice concentration, for linear ITD vicen_init , & ! initial ice volume (m), for linear ITD - vsnon_init ! initial snow volume (m), for aerosol + vsnon_init ! initial snow volume (m), for aerosol !======================================================================= @@ -137,7 +137,7 @@ module ice_state !======================================================================= ! -! Allocate space for all state variables +! Allocate space for all state variables ! subroutine alloc_state integer (int_kind) :: ntrcr, ierr @@ -168,7 +168,7 @@ subroutine alloc_state vsnon (nx_block,ny_block,ncat,max_blocks) , & ! volume per unit area of snow (m) aicen_init(nx_block,ny_block,ncat,max_blocks) , & ! initial ice concentration, for linear ITD vicen_init(nx_block,ny_block,ncat,max_blocks) , & ! initial ice volume (m), for linear ITD - vsnon_init(nx_block,ny_block,ncat,max_blocks) , & ! initial snow volume (m), for aerosol + vsnon_init(nx_block,ny_block,ncat,max_blocks) , & ! initial snow volume (m), for aerosol trcr (nx_block,ny_block,ntrcr,max_blocks) , & ! ice tracers: 1: surface temperature of ice/snow (C) trcrn (nx_block,ny_block,ntrcr,ncat,max_blocks) , & ! tracers: 1: surface temperature of ice/snow (C) stat=ierr) @@ -181,7 +181,7 @@ subroutine alloc_state trcr_base(ntrcr,3) , & ! = 0 or 1 depending on tracer dependency, (1) aice, (2) vice, (3) vsno stat=ierr) if (ierr/=0) call abort_ice('(alloc_state): Out of memory2') - + trcr_depend = 0 n_trcr_strata = 0 nt_strata = 0 diff --git a/cicecore/cicedynB/general/ice_step_mod.F90 b/cicecore/cicedynB/general/ice_step_mod.F90 index 3f9b9abeb..b6f8741c0 100644 --- a/cicecore/cicedynB/general/ice_step_mod.F90 +++ b/cicecore/cicedynB/general/ice_step_mod.F90 @@ -128,7 +128,7 @@ subroutine prep_radiation (iblk) alidr_init(:,:,iblk) = c0 alidf_init(:,:,iblk) = c0 - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -188,7 +188,10 @@ subroutine step_therm1 (dt, iblk) hfreebd, hdraft, hridge, distrdg, hkeel, dkeel, lfloe, dfloe, & fswsfcn, fswintn, Sswabsn, Iswabsn, meltsliqn, meltsliq, & fswthrun, fswthrun_vdr, fswthrun_vdf, fswthrun_idr, fswthrun_idf - use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_blocks, only: block, get_block +#ifdef CICE_IN_NEMO + use ice_blocks, only: nx_block, ny_block +#endif use ice_calendar, only: yday use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero @@ -205,13 +208,16 @@ subroutine step_therm1 (dt, iblk) use ice_flux_bgc, only: dsnown, faero_atm, faero_ocn, fiso_atm, fiso_ocn, & Qa_iso, Qref_iso, fiso_evap, HDO_ocn, H2_16O_ocn, H2_18O_ocn use ice_grid, only: lmask_n, lmask_s, tmask - use ice_state, only: aice, aicen, aice_init, aicen_init, vicen_init, & + use ice_state, only: aice, aicen, aicen_init, vicen_init, & vice, vicen, vsno, vsnon, trcrn, uvel, vvel, vsnon_init +#ifdef CICE_IN_NEMO + use ice_state, only: aice_init +#endif #ifdef CESMCOUPLED use ice_prescribed_mod, only: prescribed_ice #else - logical (kind=log_kind) :: & + logical (kind=log_kind) :: & prescribed_ice ! if .true., use prescribed ice instead of computed #endif real (kind=dbl_kind), intent(in) :: & @@ -222,7 +228,7 @@ subroutine step_therm1 (dt, iblk) ! local variables #ifdef CICE_IN_NEMO - real (kind=dbl_kind) :: & + real (kind=dbl_kind) :: & raice ! reciprocal of ice concentration #endif integer (kind=int_kind) :: & @@ -321,12 +327,12 @@ subroutine step_therm1 (dt, iblk) enddo ! j #endif - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi @@ -388,16 +394,16 @@ subroutine step_therm1 (dt, iblk) uvel = uvel_center , & vvel = vvel_center , & Tsfc = trcrn (i,j,nt_Tsfc,:,iblk), & - zqsn = trcrn (i,j,nt_qsno:nt_qsno+nslyr-1,:,iblk), & - zqin = trcrn (i,j,nt_qice:nt_qice+nilyr-1,:,iblk), & - zSin = trcrn (i,j,nt_sice:nt_sice+nilyr-1,:,iblk), & - alvl = trcrn (i,j,nt_alvl,:,iblk), & - vlvl = trcrn (i,j,nt_vlvl,:,iblk), & - apnd = trcrn (i,j,nt_apnd,:,iblk), & - hpnd = trcrn (i,j,nt_hpnd,:,iblk), & - ipnd = trcrn (i,j,nt_ipnd,:,iblk), & + zqsn = trcrn (i,j,nt_qsno:nt_qsno+nslyr-1,:,iblk), & + zqin = trcrn (i,j,nt_qice:nt_qice+nilyr-1,:,iblk), & + zSin = trcrn (i,j,nt_sice:nt_sice+nilyr-1,:,iblk), & + alvl = trcrn (i,j,nt_alvl,:,iblk), & + vlvl = trcrn (i,j,nt_vlvl,:,iblk), & + apnd = trcrn (i,j,nt_apnd,:,iblk), & + hpnd = trcrn (i,j,nt_hpnd,:,iblk), & + ipnd = trcrn (i,j,nt_ipnd,:,iblk), & iage = trcrn (i,j,nt_iage,:,iblk), & - FY = trcrn (i,j,nt_FY ,:,iblk), & + FY = trcrn (i,j,nt_FY ,:,iblk), & rsnwn = rsnwn (:,:), & smicen = smicen (:,:), & smliqn = smliqn (:,:), & @@ -601,7 +607,7 @@ subroutine step_therm2 (dt, iblk) use ice_blocks, only: block, get_block use ice_calendar, only: yday use ice_domain, only: blocks_ice - use ice_domain_size, only: ncat, nilyr, nslyr, n_aero, nblyr, nfsd + use ice_domain_size, only: ncat, nilyr, nslyr, nblyr, nfsd use ice_flux, only: fresh, frain, fpond, frzmlt, frazil, frz_onset, & update_ocn_f, fsalt, Tf, sss, salinz, fhocn, rside, fside, & meltl, frazil_diag @@ -651,7 +657,7 @@ subroutine step_therm2 (dt, iblk) nltrcr = 0 endif - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -668,7 +674,7 @@ subroutine step_therm2 (dt, iblk) call icepack_step_therm2(dt=dt, ncat=ncat, & nltrcr=nltrcr, nilyr=nilyr, nslyr=nslyr, nblyr=nblyr, & - hin_max = hin_max (:), & + hin_max = hin_max (:), & aicen = aicen (i,j,:,iblk), & vicen = vicen (i,j,:,iblk), & vsnon = vsnon (i,j,:,iblk), & @@ -760,8 +766,8 @@ subroutine update_state (dt, daidt, dvidt, dagedt, offset) real (kind=dbl_kind), intent(in), optional :: & offset ! d(age)/dt time offset = dt for thermo, 0 for dyn - integer (kind=int_kind) :: & - iblk, & ! block index + integer (kind=int_kind) :: & + iblk, & ! block index i,j, & ! horizontal indices ntrcr, & ! nt_iage ! @@ -795,9 +801,9 @@ subroutine update_state (dt, daidt, dvidt, dagedt, offset) do i = 1, nx_block !----------------------------------------------------------------- - ! Aggregate the updated state variables (includes ghost cells). - !----------------------------------------------------------------- - + ! Aggregate the updated state variables (includes ghost cells). + !----------------------------------------------------------------- + ! if (tmask(i,j,iblk)) & call icepack_aggregate(ncat = ncat, & aicen = aicen(i,j,:,iblk), & @@ -856,7 +862,7 @@ end subroutine update_state subroutine step_dyn_wave (dt) - use ice_arrays_column, only: wave_spectrum, wave_sig_ht, & + use ice_arrays_column, only: wave_spectrum, & d_afsd_wave, floe_rad_l, floe_rad_c, wavefreq, dwavefreq use ice_blocks, only: block, get_block use ice_domain, only: blocks_ice, nblocks @@ -876,9 +882,7 @@ subroutine step_dyn_wave (dt) integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain iblk, & ! block index - i, j, & ! horizontal indices - ntrcr, & ! - nbtrcr ! + i, j ! horizontal indices character (len=char_len) :: wave_spec_type @@ -1000,14 +1004,14 @@ subroutine step_dyn_ridge (dt, ndtd, iblk) integer (kind=int_kind), intent(in) :: & ndtd, & ! number of dynamics subcycles - iblk ! block index + iblk ! block index ! local variables type (block) :: & this_block ! block information for current block - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain i, j, & ! horizontal indices ntrcr, & ! @@ -1127,9 +1131,7 @@ subroutine step_snow (dt, iblk) integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain i, j, & ! horizontal indices - n, & ! category index - ns, & ! history streams index - ipoint ! index for print diagnostic + ns ! history streams index real (kind=dbl_kind) :: & puny @@ -1142,7 +1144,7 @@ subroutine step_snow (dt, iblk) type (block) :: & this_block ! block information for current block - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -1182,7 +1184,7 @@ subroutine step_snow (dt, iblk) trcrn(i,j,nt_qsno:nt_qsno+nslyr-1,:,iblk), & trcrn(i,j,nt_alvl,:,iblk), & trcrn(i,j,nt_vlvl,:,iblk), & - trcrn(i,j,nt_smice:nt_smice+nslyr-1,:,iblk), & + trcrn(i,j,nt_smice:nt_smice+nslyr-1,:,iblk), & trcrn(i,j,nt_smliq:nt_smliq+nslyr-1,:,iblk), & trcrn(i,j,nt_rsnw:nt_rsnw+nslyr-1,:,iblk), & trcrn(i,j,nt_rhos:nt_rhos+nslyr-1,:,iblk), & @@ -1297,7 +1299,7 @@ subroutine step_radiation (dt, iblk) allocate(ztrcr_sw(nbtrcr_sw,ncat)) allocate(rsnow(nslyr,ncat)) - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -1377,7 +1379,7 @@ subroutine step_radiation (dt, iblk) dhsn =dhsn (i,j,: ,iblk), ffracn =ffracn(i,j,:,iblk), & rsnow =rsnow (:,:), l_print_point=l_print_point) endif - + if (dEdd_algae .and. (tr_zaero .or. tr_bgc_N)) then do n = 1, ncat do k = 1, nbtrcr_sw @@ -1495,24 +1497,24 @@ subroutine ocean_mixed_layer (dt, iblk) j = indxj(ij) call icepack_atm_boundary(sfctype = 'ocn', & - Tsf = sst (i,j,iblk), & + Tsf = sst (i,j,iblk), & potT = potT (i,j,iblk), & uatm = uatmT (i,j,iblk), & vatm = vatmT (i,j,iblk), & - wind = wind (i,j,iblk), & - zlvl = zlvl (i,j,iblk), & - Qa = Qa (i,j,iblk), & + wind = wind (i,j,iblk), & + zlvl = zlvl (i,j,iblk), & + Qa = Qa (i,j,iblk), & rhoa = rhoa (i,j,iblk), & - strx = strairx_ocn(i,j,iblk), & - stry = strairy_ocn(i,j,iblk), & - Tref = Tref_ocn (i,j,iblk), & - Qref = Qref_ocn (i,j,iblk), & - delt = delt (i,j), & + strx = strairx_ocn(i,j,iblk), & + stry = strairy_ocn(i,j,iblk), & + Tref = Tref_ocn (i,j,iblk), & + Qref = Qref_ocn (i,j,iblk), & + delt = delt (i,j), & delq = delq (i,j), & lhcoef = lhcoef (i,j), & shcoef = shcoef (i,j), & - Cdn_atm = Cdn_atm (i,j,iblk), & - Cdn_atm_ratio_n = Cdn_atm_ratio(i,j,iblk)) + Cdn_atm = Cdn_atm (i,j,iblk), & + Cdn_atm_ratio_n = Cdn_atm_ratio(i,j,iblk)) enddo ! ij call icepack_warnings_flush(nu_diag) @@ -1576,10 +1578,10 @@ subroutine biogeochemistry (dt, iblk) n_doc, n_dic, n_don, n_fed, n_fep use ice_flux, only: meltbn, melttn, congeln, snoicen, & sst, sss, fsnow, meltsn - use ice_flux_bgc, only: hin_old, flux_bio, flux_bio_atm, faero_atm, & + use ice_flux_bgc, only: hin_old, flux_bio, flux_bio_atm, faero_atm, & nit, amm, sil, dmsp, dms, algalN, doc, don, dic, fed, fep, zaeros, hum use ice_state, only: aicen_init, vicen_init, aicen, vicen, vsnon, & - trcrn, vsnon_init, aice0 + trcrn, vsnon_init, aice0 use ice_timers, only: timer_bgc, ice_timer_start, ice_timer_stop real (kind=dbl_kind), intent(in) :: & @@ -1626,7 +1628,7 @@ subroutine biogeochemistry (dt, iblk) call ice_timer_start(timer_bgc,iblk) ! biogeochemistry - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -1634,7 +1636,7 @@ subroutine biogeochemistry (dt, iblk) ! Define ocean concentrations for tracers used in simulation do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi call icepack_load_ocean_bio_array(max_nbtrcr = icepack_max_nbtrcr, & max_algae = icepack_max_algae, max_don = icepack_max_don, & @@ -1650,8 +1652,8 @@ subroutine biogeochemistry (dt, iblk) ocean_bio_all = ocean_bio_all(i,j,:,iblk)) do mm = 1,nbtrcr - ocean_bio(i,j,mm,iblk) = ocean_bio_all(i,j,bio_index_o(mm),iblk) - enddo ! mm + ocean_bio(i,j,mm,iblk) = ocean_bio_all(i,j,bio_index_o(mm),iblk) + enddo ! mm if (tr_zaero) then do mm = 1, n_zaero ! update aerosols flux_bio_atm(i,j,nlt_zaero(mm),iblk) = faero_atm(i,j,mm,iblk) @@ -1686,13 +1688,13 @@ subroutine biogeochemistry (dt, iblk) snow_bio_net = snow_bio_net(i,j,1:nbtrcr, iblk), & fswthrun = fswthrun (i,j,:, iblk), & sice_rho = sice_rho (i,j,:, iblk), & - fzsal = fzsal (i,j, iblk), & + fzsal = fzsal (i,j, iblk), & fzsal_g = fzsal_g (i,j, iblk), & meltbn = meltbn (i,j,:, iblk), & melttn = melttn (i,j,:, iblk), & congeln = congeln (i,j,:, iblk), & - snoicen = snoicen (i,j,:, iblk), & - sst = sst (i,j, iblk), & + snoicen = snoicen (i,j,:, iblk), & + sst = sst (i,j, iblk), & sss = sss (i,j, iblk), & fsnow = fsnow (i,j, iblk), & meltsn = meltsn (i,j,:, iblk), & diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 index 76a7659a6..2b64f8932 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 @@ -7,8 +7,8 @@ module ice_boundary ! ! 2007-07-19: Phil Jones, Yoshi Yoshida, John Dennis ! new naming conventions, optimizations during -! initialization, true multi-dimensional updates -! (rather than serial call to two-dimensional updates), +! initialization, true multi-dimensional updates +! (rather than serial call to two-dimensional updates), ! fixes for non-existent blocks ! 2008-01-28: Elizabeth Hunke replaced old routines with new POP ! infrastructure @@ -98,7 +98,7 @@ module ice_boundary !----------------------------------------------------------------------- ! ! to prevent frequent allocate-deallocate for 2d halo updates, create -! a static 2d buffer to be allocated once at creation. if future +! a static 2d buffer to be allocated once at creation. if future ! creation needs larger buffer, resize during the creation. ! !----------------------------------------------------------------------- @@ -177,9 +177,9 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & seBlock, swBlock, &! block id southeast, southwest nbrs srcProc, dstProc, &! source, dest processor locations srcLocalID, dstLocalID, &! local block index of src,dst blocks - maxTmp, &! temp for global maxval - blockSizeX, &! size of default physical domain in X - blockSizeY, &! size of default physical domain in Y + maxTmp, &! temp for global maxval + blockSizeX, &! size of default physical domain in X + blockSizeY, &! size of default physical domain in Y maxSizeSend, maxSizeRecv, &! max buffer sizes numMsgSend, numMsgRecv, &! number of messages for this halo eastMsgSize, westMsgSize, &! nominal sizes for e-w msgs @@ -305,7 +305,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & srcProc, dstProc, msgSize) !*** if a tripole boundary block, also create a local - !*** message into and out of tripole buffer + !*** message into and out of tripole buffer if (tripoleBlock) then !*** copy out of tripole buffer - includes halo @@ -316,7 +316,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !*** copy in only required if dstProc not same as srcProc if (dstProc /= srcProc) then call ice_HaloIncrementMsgCount(sendCount, recvCount, & - srcProc, srcProc, & + srcProc, srcProc, & msgSize) endif endif @@ -393,7 +393,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & ewBoundaryType, nsBoundaryType) if (neBlock > 0) then - msgSize = cornerMsgSize ! normal corner message + msgSize = cornerMsgSize ! normal corner message call ice_distributionGetBlockLoc(dist, neBlock, dstProc, & dstLocalID) @@ -557,7 +557,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !----------------------------------------------------------------------- ! -! if messages are received from the same processor, the message is +! if messages are received from the same processor, the message is ! actually a local copy - count them and reset to zero ! !----------------------------------------------------------------------- @@ -585,7 +585,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & ! check to see if they need to be re-sized ! !----------------------------------------------------------------------- - + maxTmp = maxval(sendCount) maxSizeSend = global_maxval(maxTmp, dist) maxTmp = maxval(recvCount) @@ -733,7 +733,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & 'north') !*** if a tripole boundary block, also create a local - !*** message into and out of tripole buffer + !*** message into and out of tripole buffer if (tripoleBlock) then !*** copy out of tripole buffer - includes halo @@ -1102,7 +1102,7 @@ subroutine ice_HaloMask(halo, basehalo, mask) elseif (mask(icel,jcel,abs(nblock)) /= 0) then tmpflag = .true. endif - + if (tmpflag) then scnt = scnt + 1 if (scnt == 1) then @@ -1117,7 +1117,7 @@ subroutine ice_HaloMask(halo, basehalo, mask) endif enddo enddo - halo%numMsgSend = numMsgSend + halo%numMsgSend = numMsgSend numMsgRecv = 0 do nmsg=1,basehalo%numMsgRecv @@ -1134,7 +1134,7 @@ subroutine ice_HaloMask(halo, basehalo, mask) elseif (mask(icel,jcel,abs(nblock)) /= 0) then tmpflag = .true. endif - + if (tmpflag) then scnt = scnt + 1 if (scnt == 1) then @@ -1149,7 +1149,7 @@ subroutine ice_HaloMask(halo, basehalo, mask) endif enddo enddo - halo%numMsgRecv = numMsgRecv + halo%numMsgRecv = numMsgRecv !----------------------------------------------------------------------- @@ -1312,7 +1312,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -1339,7 +1339,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -1409,7 +1409,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & ! ! take care of northern boundary in tripole case ! bufTripole array contains the top nghost+1 rows (u-fold) or nghost+2 rows -! (T-fold) of physical domain for entire (global) top row +! (T-fold) of physical domain for entire (global) top row ! !----------------------------------------------------------------------- @@ -1430,13 +1430,13 @@ subroutine ice_HaloUpdate2DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 2,nxGlobal/2 iDst = nxGlobal - i + 2 x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -1445,20 +1445,20 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -1467,12 +1467,12 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -1481,18 +1481,18 @@ subroutine ice_HaloUpdate2DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -1501,20 +1501,20 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -1523,7 +1523,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -1730,7 +1730,7 @@ subroutine ice_HaloUpdate2DR4(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -1752,7 +1752,7 @@ subroutine ice_HaloUpdate2DR4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -1828,13 +1828,13 @@ subroutine ice_HaloUpdate2DR4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 2,nxGlobal/2 iDst = nxGlobal - i + 2 x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1843,20 +1843,20 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1865,32 +1865,32 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select + end select else ! tripole u-fold - + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1899,20 +1899,20 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1921,7 +1921,7 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -2128,7 +2128,7 @@ subroutine ice_HaloUpdate2DI4(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -2150,7 +2150,7 @@ subroutine ice_HaloUpdate2DI4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -2226,13 +2226,13 @@ subroutine ice_HaloUpdate2DI4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 2,nxGlobal/2 iDst = nxGlobal - i + 2 x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -2241,20 +2241,20 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -2263,32 +2263,32 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - else ! tripole u-fold - + else ! tripole u-fold + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -2297,20 +2297,20 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -2319,7 +2319,7 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -2451,7 +2451,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & real (dbl_kind), dimension(:,:,:), allocatable :: & bufTripole ! 3d tripole buffer - integer (int_kind) :: len ! length of message + integer (int_kind) :: len ! length of message character(len=*), parameter :: subname = '(ice_HaloUpdate3DR8)' @@ -2554,7 +2554,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -2576,7 +2576,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -2665,10 +2665,10 @@ subroutine ice_HaloUpdate3DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -2682,20 +2682,20 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -2706,32 +2706,32 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - + end select + else ! tripole u-fold - + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i @@ -2742,20 +2742,20 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -2766,7 +2766,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -2908,7 +2908,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & real (real_kind), dimension(:,:,:), allocatable :: & bufTripole ! 3d tripole buffer - integer (int_kind) :: len ! length of message + integer (int_kind) :: len ! length of message character(len=*), parameter :: subname = '(ice_HaloUpdate3DR4)' @@ -3011,7 +3011,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -3033,7 +3033,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -3122,10 +3122,10 @@ subroutine ice_HaloUpdate3DR4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -3139,20 +3139,20 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -3163,32 +3163,32 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold - + end select + + else ! tripole u-fold + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i @@ -3199,20 +3199,20 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -3223,7 +3223,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -3468,7 +3468,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -3490,7 +3490,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -3563,7 +3563,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then - + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -3579,10 +3579,10 @@ subroutine ice_HaloUpdate3DI4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -3596,20 +3596,20 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -3620,32 +3620,32 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - - else ! tripole u-fold + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i @@ -3656,20 +3656,20 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -3680,11 +3680,11 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -3929,7 +3929,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -3951,7 +3951,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -4034,7 +4034,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then - + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -4050,10 +4050,10 @@ subroutine ice_HaloUpdate4DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -4069,17 +4069,17 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -4095,32 +4095,32 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold + end select + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 - 1 @@ -4133,20 +4133,20 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 @@ -4159,11 +4159,11 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -4410,7 +4410,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -4432,7 +4432,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -4515,7 +4515,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then - + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -4531,10 +4531,10 @@ subroutine ice_HaloUpdate4DR4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -4550,17 +4550,17 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -4576,32 +4576,32 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold + end select + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 - 1 @@ -4614,20 +4614,20 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 @@ -4640,11 +4640,11 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -4891,7 +4891,7 @@ subroutine ice_HaloUpdate4DI4(array, halo, & !----------------------------------------------------------------------- ! ! while messages are being communicated, fill out halo region -! needed for masked halos to ensure halo values are filled for +! needed for masked halos to ensure halo values are filled for ! halo grid cells that are not updated ! !----------------------------------------------------------------------- @@ -4913,7 +4913,7 @@ subroutine ice_HaloUpdate4DI4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -5012,10 +5012,10 @@ subroutine ice_HaloUpdate4DI4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -5031,17 +5031,17 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -5057,32 +5057,32 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold + end select + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 - 1 @@ -5095,20 +5095,20 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 @@ -5121,11 +5121,11 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -5354,7 +5354,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -5404,7 +5404,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & ! ! take care of northern boundary in tripole case ! bufTripole array contains the top haloWidth+1 rows of physical -! domain for entire (global) top row +! domain for entire (global) top row ! !----------------------------------------------------------------------- @@ -5432,12 +5432,12 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & ioffset = 1 joffset = 1 - case (field_loc_Eface) + case (field_loc_Eface) ioffset = 1 joffset = 0 - case (field_loc_Nface) + case (field_loc_Nface) ioffset = 0 joffset = 1 @@ -5468,7 +5468,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & iSrc = iSrc - ioffset jSrc = jSrc - joffset if (iSrc == 0) iSrc = nxGlobal - + !*** for center and Eface, do not need to replace !*** top row of physical domain, so jSrc should be !*** out of range and skipped @@ -5537,7 +5537,7 @@ subroutine ice_HaloIncrementMsgCount(sndCounter, rcvCounter, & !----------------------------------------------------------------------- ! -! if destination all land or outside closed boundary (dstProc = 0), +! if destination all land or outside closed boundary (dstProc = 0), ! then no send is necessary, so do the rest only for dstProc /= 0 ! !----------------------------------------------------------------------- @@ -5546,7 +5546,7 @@ subroutine ice_HaloIncrementMsgCount(sndCounter, rcvCounter, & !----------------------------------------------------------------------- ! -! if the current processor is the source, must send data +! if the current processor is the source, must send data ! local copy if dstProc = srcProc ! !----------------------------------------------------------------------- @@ -5556,14 +5556,14 @@ subroutine ice_HaloIncrementMsgCount(sndCounter, rcvCounter, & !----------------------------------------------------------------------- ! -! if the current processor is the destination, must receive data +! if the current processor is the destination, must receive data ! local copy if dstProc = srcProc ! !----------------------------------------------------------------------- if (dstProc == my_task + 1) then - if (srcProc > 0) then + if (srcProc > 0) then !*** the source block has ocean points !*** count as a receive from srcProc @@ -5631,7 +5631,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & !----------------------------------------------------------------------- ! -! if destination all land or outside closed boundary (dstProc = 0), +! if destination all land or outside closed boundary (dstProc = 0), ! then no send is necessary, so do the rest only for dstProc /= 0 ! !----------------------------------------------------------------------- @@ -5673,7 +5673,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & !----------------------------------------------------------------------- if (srcProc == my_task+1 .and. & - dstProc == my_task+1) then + dstProc == my_task+1) then !*** compute addresses based on direction @@ -5764,7 +5764,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & if (jeSrc - jbSrc + 1 < halo%tripoleRows) then call abort_ice(subname//'ERROR: not enough points in block for tripole') return - endif + endif do j=1,halo%tripoleRows do i=1,ieSrc-ibSrc+1 @@ -5784,7 +5784,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & else if (srcBlock < 0 .and. dstBlock > 0) then - !*** tripole grid - set up for copying out of + !*** tripole grid - set up for copying out of !*** tripole buffer into ghost cell domains !*** include e-w ghost cells @@ -5950,12 +5950,12 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & !----------------------------------------------------------------------- ! -! if dest block is local and source block does not exist, create a +! if dest block is local and source block does not exist, create a ! local copy to fill halo with a fill value ! !----------------------------------------------------------------------- - else if (srcProc == 0 .and. dstProc == my_task+1) then + else if (srcProc == 0 .and. dstProc == my_task+1) then msgIndx = halo%numLocalCopies @@ -6184,7 +6184,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & bufSize = halo%sizeSend(n) exit srchSend endif - end do srchSend + end do srchSend if (msgIndx == 0) then msgIndx = halo%numMsgSend + 1 @@ -6255,7 +6255,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & halo%sizeSend(msgIndx) = bufSize - else + else !*** tripole block - send top halo%tripoleRows rows of phys domain @@ -6318,7 +6318,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & halo%sizeSend(msgIndx) = bufSize - else + else !*** tripole block - send top halo%tripoleRows rows of phys domain @@ -6361,7 +6361,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & halo%sizeSend(msgIndx) = bufSize - else + else !*** tripole block - send top halo%tripoleRows rows of phys domain @@ -6447,7 +6447,7 @@ subroutine ice_HaloMsgCreate(halo, srcBlock, srcProc, srcLocalID, & bufSize = halo%sizeRecv(n) exit srchRecv endif - end do srchRecv + end do srchRecv if (msgIndx == 0) then msgIndx = halo%numMsgRecv + 1 @@ -6705,14 +6705,14 @@ end subroutine ice_HaloMsgCreate subroutine ice_HaloExtrapolate2DR8(ARRAY,dist,ew_bndy_type,ns_bndy_type) -! This subroutine extrapolates ARRAY values into the first row or column -! of ghost cells, and is intended for grid variables whose ghost cells -! would otherwise be set using the default boundary conditions (Dirichlet +! This subroutine extrapolates ARRAY values into the first row or column +! of ghost cells, and is intended for grid variables whose ghost cells +! would otherwise be set using the default boundary conditions (Dirichlet ! or Neumann). ! Note: This routine will need to be modified for nghost > 1. ! We assume padding occurs only on east and north edges. ! -! This is the specific interface for double precision arrays +! This is the specific interface for double precision arrays ! corresponding to the generic interface ice_HaloExtrapolate use ice_blocks, only: block, nblocks_x, nblocks_y, get_block diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 index 00f427144..fab0c9218 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 @@ -72,7 +72,7 @@ subroutine init_communicate(mpicom) if (present(mpicom)) then ice_comm = mpicom else - ice_comm = MPI_COMM_WORLD ! Global communicator + ice_comm = MPI_COMM_WORLD ! Global communicator endif call MPI_INITIALIZED(flag,ierr) diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_exit.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_exit.F90 index 061fd63c5..eafb3228f 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_exit.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_exit.F90 @@ -1,6 +1,6 @@ !======================================================================= ! -! Exit the model. +! Exit the model. ! authors William H. Lipscomb (LANL) ! Elizabeth C. Hunke (LANL) ! 2006 ECH: separated serial and mpi functionality diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_gather_scatter.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_gather_scatter.F90 index 0a58769db..030deabca 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_gather_scatter.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_gather_scatter.F90 @@ -55,13 +55,13 @@ module ice_gather_scatter module procedure gather_global_dbl, & gather_global_real, & gather_global_int - end interface + end interface interface scatter_global module procedure scatter_global_dbl, & scatter_global_real, & scatter_global_int - end interface + end interface !----------------------------------------------------------------------- ! @@ -80,7 +80,7 @@ subroutine gather_global_dbl(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) ! This subroutine gathers a distributed array to a global-sized ! array on the processor dst_task. ! -! This is the specific inteface for double precision arrays +! This is the specific inteface for double precision arrays ! corresponding to the generic interface gather_global. It is shown ! to provide information on the generic interface (the generic ! interface is identical, but chooses a specific inteface based @@ -141,7 +141,7 @@ subroutine gather_global_dbl(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) !----------------------------------------------------------------------- ! -! if this task is the dst_task, copy local blocks into the global +! if this task is the dst_task, copy local blocks into the global ! array and post receives for non-local blocks. ! !----------------------------------------------------------------------- @@ -308,7 +308,7 @@ subroutine gather_global_real(ARRAY_G, ARRAY, dst_task, src_dist) !----------------------------------------------------------------------- ! -! if this task is the dst_task, copy local blocks into the global +! if this task is the dst_task, copy local blocks into the global ! array and post receives for non-local blocks. ! !----------------------------------------------------------------------- @@ -475,7 +475,7 @@ subroutine gather_global_int(ARRAY_G, ARRAY, dst_task, src_dist) !----------------------------------------------------------------------- ! -! if this task is the dst_task, copy local blocks into the global +! if this task is the dst_task, copy local blocks into the global ! array and post receives for non-local blocks. ! !----------------------------------------------------------------------- @@ -597,7 +597,7 @@ subroutine gather_global_ext_dbl(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) real (dbl_kind), intent(in), optional :: & spc_val - + !----------------------------------------------------------------------- ! ! local variables @@ -643,7 +643,7 @@ subroutine gather_global_ext_dbl(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) !----------------------------------------------------------------------- ! -! if this task is the dst_task, copy local blocks into the global +! if this task is the dst_task, copy local blocks into the global ! array and post receives for non-local blocks. ! !----------------------------------------------------------------------- @@ -907,7 +907,7 @@ subroutine gather_global_ext_int(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) integer (int_kind), intent(in), optional :: & spc_val - + !----------------------------------------------------------------------- ! ! local variables @@ -953,7 +953,7 @@ subroutine gather_global_ext_int(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) !----------------------------------------------------------------------- ! -! if this task is the dst_task, copy local blocks into the global +! if this task is the dst_task, copy local blocks into the global ! array and post receives for non-local blocks. ! !----------------------------------------------------------------------- @@ -1217,7 +1217,7 @@ subroutine gather_global_ext_log(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) logical (log_kind), intent(in), optional :: & spc_val - + !----------------------------------------------------------------------- ! ! local variables @@ -1263,7 +1263,7 @@ subroutine gather_global_ext_log(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) !----------------------------------------------------------------------- ! -! if this task is the dst_task, copy local blocks into the global +! if this task is the dst_task, copy local blocks into the global ! array and post receives for non-local blocks. ! !----------------------------------------------------------------------- @@ -1513,7 +1513,7 @@ subroutine scatter_global_dbl(ARRAY, ARRAY_G, src_task, dst_dist, & ! This subroutine scatters a global-sized array to a distributed array. ! -! This is the specific interface for double precision arrays +! This is the specific interface for double precision arrays ! corresponding to the generic interface scatter_global. integer (int_kind), intent(in) :: & @@ -1552,9 +1552,6 @@ subroutine scatter_global_dbl(ARRAY, ARRAY_G, src_task, dst_dist, & type (block) :: & this_block ! block info for current block - integer (int_kind), dimension(MPI_STATUS_SIZE) :: & - status - integer (int_kind), dimension(:), allocatable :: & rcv_request ! request array for receives @@ -1628,7 +1625,7 @@ subroutine scatter_global_dbl(ARRAY, ARRAY_G, src_task, dst_dist, & !----------------------------------------------------------------------- ! -! if this task is the src_task, copy blocks of global array into +! if this task is the src_task, copy blocks of global array into ! message buffer and send to other processors. also copy local blocks ! !----------------------------------------------------------------------- @@ -1941,9 +1938,6 @@ subroutine scatter_global_real(ARRAY, ARRAY_G, src_task, dst_dist, & type (block) :: & this_block ! block info for current block - integer (int_kind), dimension(MPI_STATUS_SIZE) :: & - status - integer (int_kind), dimension(:), allocatable :: & rcv_request ! request array for receives @@ -2017,7 +2011,7 @@ subroutine scatter_global_real(ARRAY, ARRAY_G, src_task, dst_dist, & !----------------------------------------------------------------------- ! -! if this task is the src_task, copy blocks of global array into +! if this task is the src_task, copy blocks of global array into ! message buffer and send to other processors. also copy local blocks ! !----------------------------------------------------------------------- @@ -2330,9 +2324,6 @@ subroutine scatter_global_int(ARRAY, ARRAY_G, src_task, dst_dist, & type (block) :: & this_block ! block info for current block - integer (int_kind), dimension(MPI_STATUS_SIZE) :: & - status - integer (int_kind), dimension(:), allocatable :: & rcv_request ! request array for receives @@ -2406,7 +2397,7 @@ subroutine scatter_global_int(ARRAY, ARRAY_G, src_task, dst_dist, & !----------------------------------------------------------------------- ! -! if this task is the src_task, copy blocks of global array into +! if this task is the src_task, copy blocks of global array into ! message buffer and send to other processors. also copy local blocks ! !----------------------------------------------------------------------- @@ -2666,7 +2657,7 @@ subroutine scatter_global_ext(ARRAY, ARRAY_G, src_task, dst_dist) ! This subroutine scatters a global-sized array to a distributed array. ! -! This is the specific interface for double precision arrays +! This is the specific interface for double precision arrays ! corresponding to the generic interface scatter_global. integer (int_kind), intent(in) :: & @@ -2698,9 +2689,6 @@ subroutine scatter_global_ext(ARRAY, ARRAY_G, src_task, dst_dist) type (block) :: & this_block ! block info for current block - integer (int_kind), dimension(MPI_STATUS_SIZE) :: & - status - integer (int_kind), dimension(:), allocatable :: & rcv_request ! request array for receives @@ -2722,7 +2710,7 @@ subroutine scatter_global_ext(ARRAY, ARRAY_G, src_task, dst_dist) !----------------------------------------------------------------------- ! -! if this task is the src_task, copy blocks of global array into +! if this task is the src_task, copy blocks of global array into ! message buffer and send to other processors. also copy local blocks ! !----------------------------------------------------------------------- @@ -3034,9 +3022,6 @@ subroutine scatter_global_stress(ARRAY, ARRAY_G1, ARRAY_G2, & type (block) :: & this_block ! block info for current block - integer (int_kind), dimension(MPI_STATUS_SIZE) :: & - status - integer (int_kind), dimension(:), allocatable :: & rcv_request ! request array for receives @@ -3058,17 +3043,17 @@ subroutine scatter_global_stress(ARRAY, ARRAY_G1, ARRAY_G2, & this_block = get_block(1,1) ! for the tripoleTflag - all blocks have it if (this_block%tripoleTFlag) then - xoffset = 2 ! treat stresses as cell-centered scalars (they are not + xoffset = 2 ! treat stresses as cell-centered scalars (they are not yoffset = 0 ! shared with neighboring grid cells) else - xoffset = 1 ! treat stresses as cell-centered scalars (they are not + xoffset = 1 ! treat stresses as cell-centered scalars (they are not yoffset = 1 ! shared with neighboring grid cells) endif isign = 1 !----------------------------------------------------------------------- ! -! if this task is the src_task, copy blocks of global array into +! if this task is the src_task, copy blocks of global array into ! message buffer and send to other processors. also copy local blocks ! !----------------------------------------------------------------------- diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 index 0728ac105..a5fed760b 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 @@ -533,7 +533,6 @@ function global_sum_scalar_dbl(scalar, dist) & !----------------------------------------------------------------------- integer (int_kind) :: & - ierr, &! mpi error flag numProcs, &! number of processor participating numBlocks, &! number of local blocks communicator ! communicator for this distribution @@ -603,7 +602,6 @@ function global_sum_scalar_real(scalar, dist) & !----------------------------------------------------------------------- integer (int_kind) :: & - ierr, &! mpi error flag numProcs, &! number of processor participating numBlocks, &! number of local blocks communicator ! communicator for this distribution @@ -714,7 +712,7 @@ end function global_sum_scalar_int function global_allreduce_sum_vector_dbl(vector, dist) & result(globalSums) -! Computes the global sums of sets of scalars (elements of 'vector') +! Computes the global sums of sets of scalars (elements of 'vector') ! distributed across a parallel machine. ! ! This is actually the specific interface for the generic global_allreduce_sum @@ -737,7 +735,6 @@ function global_allreduce_sum_vector_dbl(vector, dist) & !----------------------------------------------------------------------- integer (int_kind) :: & - ierr, &! mpi error flag numProcs, &! number of processor participating numBlocks, &! number of local blocks communicator, &! communicator for this distribution @@ -781,9 +778,9 @@ function global_sum_prod_dbl (array1, array2, dist, field_loc, & ! Computes the global sum of the physical domain of a product of ! two 2-d arrays. ! -! This is actually the specific interface for the generic +! This is actually the specific interface for the generic ! global_sum_prod function corresponding to double precision arrays. -! The generic interface is identical but will handle real and integer +! The generic interface is identical but will handle real and integer ! 2-d slabs. real (dbl_kind), dimension(:,:,:), intent(in) :: & @@ -919,9 +916,9 @@ function global_sum_prod_real (array1, array2, dist, field_loc, & ! Computes the global sum of the physical domain of a product of ! two 2-d arrays. ! -! This is actually the specific interface for the generic +! This is actually the specific interface for the generic ! global_sum_prod function corresponding to single precision arrays. -! The generic interface is identical but will handle real and integer +! The generic interface is identical but will handle real and integer ! 2-d slabs. real (real_kind), dimension(:,:,:), intent(in) :: & @@ -1057,9 +1054,9 @@ function global_sum_prod_int (array1, array2, dist, field_loc, & ! Computes the global sum of the physical domain of a product of ! two 2-d arrays. ! -! This is actually the specific interface for the generic +! This is actually the specific interface for the generic ! global_sum_prod function corresponding to integer arrays. -! The generic interface is identical but will handle real and integer +! The generic interface is identical but will handle real and integer ! 2-d slabs. integer (int_kind), dimension(:,:,:), intent(in) :: & @@ -1198,7 +1195,7 @@ function global_maxval_dbl (array, dist, lMask) & ! Computes the global maximum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to double precision arrays. +! function corresponding to double precision arrays. real (dbl_kind), dimension(:,:,:), intent(in) :: & array ! array for which max value needed @@ -1305,7 +1302,7 @@ function global_maxval_real (array, dist, lMask) & ! Computes the global maximum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision arrays. +! function corresponding to single precision arrays. real (real_kind), dimension(:,:,:), intent(in) :: & array ! array for which max value needed @@ -1412,7 +1409,7 @@ function global_maxval_int (array, dist, lMask) & ! Computes the global maximum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to integer arrays. +! function corresponding to integer arrays. integer (int_kind), dimension(:,:,:), intent(in) :: & array ! array for which max value needed @@ -1520,7 +1517,7 @@ function global_maxval_scalar_dbl (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to double precision scalars. +! function corresponding to double precision scalars. real (dbl_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1578,7 +1575,7 @@ function global_maxval_scalar_real (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. real (real_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1636,7 +1633,7 @@ function global_maxval_scalar_int (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1694,7 +1691,7 @@ function global_maxval_scalar_int_nodist (scalar, communicator) & ! a communicator. This method supports testing. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1743,7 +1740,7 @@ function global_minval_dbl (array, dist, lMask) & ! Computes the global minimum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_minval -! function corresponding to double precision arrays. +! function corresponding to double precision arrays. real (dbl_kind), dimension(:,:,:), intent(in) :: & array ! array for which min value needed @@ -1850,7 +1847,7 @@ function global_minval_real (array, dist, lMask) & ! Computes the global minimum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision arrays. +! function corresponding to single precision arrays. real (real_kind), dimension(:,:,:), intent(in) :: & array ! array for which min value needed @@ -1957,7 +1954,7 @@ function global_minval_int (array, dist, lMask) & ! Computes the global minimum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_minval -! function corresponding to integer arrays. +! function corresponding to integer arrays. integer (int_kind), dimension(:,:,:), intent(in) :: & array ! array for which min value needed @@ -2065,7 +2062,7 @@ function global_minval_scalar_dbl (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to double precision scalars. +! function corresponding to double precision scalars. real (dbl_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2123,7 +2120,7 @@ function global_minval_scalar_real (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. real (real_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2181,7 +2178,7 @@ function global_minval_scalar_int (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2239,7 +2236,7 @@ function global_minval_scalar_int_nodist (scalar, communicator) & ! a communicator. This method supports testing. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2299,7 +2296,7 @@ subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) ! reprosum = fixed point method based on ordered double integer sums. ! that requires two scalar reductions per global sum. ! This is extremely likely to be bfb. -! (See Mirin and Worley, 2012, IJHPCA, 26, 1730, +! (See Mirin and Worley, 2012, IJHPCA, 26, 1730, ! https://journals.sagepub.com/doi/10.1177/1094342011412630) ! ddpdd = parallel double-double algorithm using single scalar reduction. ! This is very likely to be bfb. diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_reprosum.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_reprosum.F90 index 27f66f712..8c6f90363 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_reprosum.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_reprosum.F90 @@ -3,34 +3,34 @@ MODULE ice_reprosum -!----------------------------------------------------------------------- -! -! Purpose: -!> Compute reproducible global sums of a set of arrays across an MPI +!----------------------------------------------------------------------- +! +! Purpose: +!> Compute reproducible global sums of a set of arrays across an MPI !> subcommunicator ! -! Methods: +! Methods: !> Compute using either or both a scalable, reproducible algorithm and a !> scalable, nonreproducible algorithm: -!> * Reproducible (scalable): +!> * Reproducible (scalable): !> Convert to fixed point (integer vector representation) to enable !> reproducibility when using MPI_Allreduce !> * Alternative usually reproducible (scalable): -!> Use parallel double-double algorithm due to Helen He and +!> Use parallel double-double algorithm due to Helen He and !> Chris Ding, based on David Bailey's/Don Knuth's DDPDD algorithm -!> * Nonreproducible (scalable): +!> * Nonreproducible (scalable): !> Floating point and MPI_Allreduce based. -!> If computing both reproducible and nonreproducible sums, compare +!> If computing both reproducible and nonreproducible sums, compare !> these and report relative difference (if absolute difference !> less than sum) or absolute difference back to calling routine. ! -! Author: P. Worley (based on suggestions from J. White for fixed -! point algorithm and on He/Ding paper for ddpdd +! Author: P. Worley (based on suggestions from J. White for fixed +! point algorithm and on He/Ding paper for ddpdd ! algorithm) ! ! Modified by T.Craig for CICE, March 2019 based on the public version in ! Oasis3-MCT_4.0. -! +! !----------------------------------------------------------------------- !----------------------------------------------------------------------- @@ -65,7 +65,7 @@ MODULE ice_reprosum public :: & ice_reprosum_setopts, &! set runtime options ice_reprosum_calc, &! calculate distributed sum - ice_reprosum_tolExceeded ! utility function to check relative + ice_reprosum_tolExceeded ! utility function to check relative ! differences against the tolerance !----------------------------------------------------------------------- @@ -87,14 +87,14 @@ MODULE ice_reprosum !----------------------------------------------------------------------- logical :: repro_sum_use_ddpdd = .false. - logical :: detailed_timing = .false. +! logical :: detailed_timing = .false. character(len=char_len_long) :: tmpstr CONTAINS !======================================================================== -!----------------------------------------------------------------------- -! Purpose: +!----------------------------------------------------------------------- +! Purpose: !> Set runtime options ! Author: P. Worley !----------------------------------------------------------------------- @@ -109,11 +109,11 @@ subroutine ice_reprosum_setopts(repro_sum_use_ddpdd_in, & logical, intent(in), optional :: repro_sum_use_ddpdd_in !< Use DDPDD algorithm instead of fixed precision algorithm real(r8), intent(in), optional :: repro_sum_rel_diff_max_in - !< maximum permissible difference between reproducible and + !< maximum permissible difference between reproducible and !< nonreproducible sums logical, intent(in), optional :: repro_sum_recompute_in - !< recompute using different algorithm when difference between - !< reproducible and nonreproducible sums is too great + !< recompute using different algorithm when difference between + !< reproducible and nonreproducible sums is too great logical, intent(in), optional :: repro_sum_master !< flag indicating whether this process should output !< log messages @@ -187,10 +187,10 @@ end subroutine ice_reprosum_setopts !======================================================================== -! -! Purpose: -!> Compute the global sum of each field in "arr" using the indicated -!> communicator with a reproducible yet scalable implementation based +! +! Purpose: +!> Compute the global sum of each field in "arr" using the indicated +!> communicator with a reproducible yet scalable implementation based !> on a fixed point algorithm. An alternative is to use an "almost !> always reproducible" floating point algorithm. ! @@ -198,65 +198,65 @@ end subroutine ice_reprosum_setopts ! number of "levels" of integer expansion. The algorithm will calculate ! the number of levels that is required for the sum to be essentially ! exact. The optional parameter arr_max_levels can be used to override -! the calculated value. The optional parameter arr_max_levels_out can be +! the calculated value. The optional parameter arr_max_levels_out can be ! used to return the values used. ! -! The algorithm also requires an upper bound on -! the maximum summand (in absolute value) for each field, and will +! The algorithm also requires an upper bound on +! the maximum summand (in absolute value) for each field, and will ! calculate this internally. However, if the optional parameters ! arr_max_levels and arr_gbl_max are both set, then the algorithm will ! use the values in arr_gbl_max for the upper bounds instead. If these ! are not upper bounds, or if the upper bounds are not tight enough ! to achieve the requisite accuracy, and if the optional parameter ! repro_sum_validate is NOT set to .false., the algorithm will repeat the -! computation with appropriate upper bounds. If only arr_gbl_max is present, +! computation with appropriate upper bounds. If only arr_gbl_max is present, ! then the maxima are computed internally (and the specified values are -! ignored). The optional parameter arr_gbl_max_out can be +! ignored). The optional parameter arr_gbl_max_out can be ! used to return the values used. ! ! Finally, the algorithm requires an upper bound on the number of -! local summands across all processes. This will be calculated internally, -! using an MPI collective, but the value in the optional argument +! local summands across all processes. This will be calculated internally, +! using an MPI collective, but the value in the optional argument ! gbl_max_nsummands will be used instead if (1) it is present, (2) -! it is > 0, and (3) the maximum value and required number of levels -! are also specified. (If the maximum value is calculated, the same -! MPI collective is used to determine the maximum number of local -! summands.) The accuracy of the user-specified value is not checked. -! However, if set to < 1, the value will instead be calculated. If the -! optional parameter gbl_max_nsummands_out is present, then the value -! used (gbl_max_nsummands if >= 1; calculated otherwise) will be +! it is > 0, and (3) the maximum value and required number of levels +! are also specified. (If the maximum value is calculated, the same +! MPI collective is used to determine the maximum number of local +! summands.) The accuracy of the user-specified value is not checked. +! However, if set to < 1, the value will instead be calculated. If the +! optional parameter gbl_max_nsummands_out is present, then the value +! used (gbl_max_nsummands if >= 1; calculated otherwise) will be ! returned. ! ! If requested (by setting ice_reprosum_reldiffmax >= 0.0 and passing in -! the optional rel_diff parameter), results are compared with a -! nonreproducible floating point algorithm. +! the optional rel_diff parameter), results are compared with a +! nonreproducible floating point algorithm. ! -! Note that the cost of the algorithm is not strongly correlated with +! Note that the cost of the algorithm is not strongly correlated with ! the number of levels, which primarily shows up as a (modest) increase -! in cost of the MPI_Allreduce as a function of vector length. Rather the -! cost is more a function of (a) the number of integers required to +! in cost of the MPI_Allreduce as a function of vector length. Rather the +! cost is more a function of (a) the number of integers required to ! represent an individual summand and (b) the number of MPI_Allreduce -! calls. The number of integers required to represent an individual -! summand is 1 or 2 when using 8-byte integers for 8-byte real summands -! when the number of local summands is not too large. As the number of -! local summands increases, the number of integers required increases. +! calls. The number of integers required to represent an individual +! summand is 1 or 2 when using 8-byte integers for 8-byte real summands +! when the number of local summands is not too large. As the number of +! local summands increases, the number of integers required increases. ! The number of MPI_Allreduce calls is either 2 (specifying nothing) or -! 1 (specifying gbl_max_nsummands, arr_max_levels, and arr_gbl_max -! correctly). When specifying arr_max_levels and arr_gbl_max +! 1 (specifying gbl_max_nsummands, arr_max_levels, and arr_gbl_max +! correctly). When specifying arr_max_levels and arr_gbl_max ! incorrectly, 3 or 4 MPI_Allreduce calls will be required. ! ! The alternative algorithm is a minor modification of a parallel ! implementation of David Bailey's routine DDPDD by Helen He ! and Chris Ding. Bailey uses the Knuth trick to implement quadruple -! precision summation of double precision values with 10 double -! precision operations. The advantage of this algorithm is that +! precision summation of double precision values with 10 double +! precision operations. The advantage of this algorithm is that ! it requires a single MPI_Allreduce and is less expensive per summand -! than is the fixed precision algorithm. The disadvantage is that it -! is not guaranteed to be reproducible (though it is reproducible -! much more often than is the standard algorithm). This alternative -! is used when the optional parameter ddpdd_sum is set to .true. It is -! also used if the fixed precision algorithm radix assumption does not -! hold. +! than is the fixed precision algorithm. The disadvantage is that it +! is not guaranteed to be reproducible (though it is reproducible +! much more often than is the standard algorithm). This alternative +! is used when the optional parameter ddpdd_sum is set to .true. It is +! also used if the fixed precision algorithm radix assumption does not +! hold. !---------------------------------------------------------------------- subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & @@ -273,10 +273,10 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & integer, intent(in) :: nsummands !< number of local summands integer, intent(in) :: dsummands !< declared first dimension integer, intent(in) :: nflds !< number of fields - real(r8), intent(in) :: arr(dsummands,nflds) + real(r8), intent(in) :: arr(dsummands,nflds) !< input array - real(r8), intent(out):: arr_gsum(nflds) + real(r8), intent(out):: arr_gsum(nflds) !< global means logical, intent(in), optional :: ddpdd_sum @@ -287,7 +287,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & !< upper bound on max(abs(arr)) real(r8), intent(out), optional :: arr_gbl_max_out(nflds) - !< calculated upper bound on + !< calculated upper bound on !< max(abs(arr)) integer, intent(in), optional :: arr_max_levels(nflds) @@ -306,13 +306,13 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & !< calculated maximum nsummands !< over all processes - integer, intent(in), optional :: gbl_count + integer, intent(in), optional :: gbl_count !< was total number of summands; - !< now is ignored; use + !< now is ignored; use !< gbl_max_nsummands instead logical, intent(in), optional :: repro_sum_validate - !< flag enabling/disabling testing that gmax and max_levels are + !< flag enabling/disabling testing that gmax and max_levels are !< accurate/sufficient. Default is enabled. integer, intent(inout), optional :: repro_sum_stats(5) @@ -325,21 +325,21 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & real(r8), intent(out), optional :: rel_diff(2,nflds) !< relative and absolute - !< differences between fixed + !< differences between fixed !< and floating point sums - integer, intent(in), optional :: commid + integer, intent(in), optional :: commid !< MPI communicator ! Local workspace logical :: use_ddpdd_sum ! flag indicating whether to ! use ice_reprosum_ddpdd or not - logical :: recompute ! flag indicating need to - ! determine gmax/gmin before + logical :: recompute ! flag indicating need to + ! determine gmax/gmin before ! computing sum - logical :: validate ! flag indicating need to - ! verify gmax and max_levels + logical :: validate ! flag indicating need to + ! verify gmax and max_levels ! are accurate/sufficient integer :: omp_nthreads ! number of OpenMP threads integer :: mpi_comm ! MPI subcommunicator @@ -364,12 +364,12 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & integer :: arr_lextremes(0:nflds,2)! local exponent extrema integer :: arr_gextremes(0:nflds,2)! global exponent extrema - integer :: arr_gmax_exp(nflds) ! global exponents maxima + integer :: arr_gmax_exp(nflds) ! global exponents maxima integer :: arr_gmin_exp(nflds) ! global exponents minima - integer :: arr_max_shift ! maximum safe exponent for + integer :: arr_max_shift ! maximum safe exponent for ! value < 1 (so that sum does ! not overflow) - integer :: max_levels(nflds) ! maximum number of levels of + integer :: max_levels(nflds) ! maximum number of levels of ! integer expansion to use integer :: max_level ! maximum value in max_levels integer :: gbl_max_red ! global max local sum reduction? (0/1) @@ -380,16 +380,16 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & real(r8) :: xmax_nsummands ! dble of max_nsummands real(r8) :: arr_lsum(nflds) ! local sums - real(r8) :: arr_gsum_fast(nflds) ! global sum calculated using - ! fast, nonreproducible, + real(r8) :: arr_gsum_fast(nflds) ! global sum calculated using + ! fast, nonreproducible, ! floating point alg. - real(r8) :: abs_diff ! absolute difference between - ! fixed and floating point + real(r8) :: abs_diff ! absolute difference between + ! fixed and floating point ! sums #ifdef _OPENMP integer omp_get_max_threads external omp_get_max_threads -#endif +#endif character(len=*),parameter :: subname = '(ice_reprosum_calc)' !----------------------------------------------------------------------- @@ -483,7 +483,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & if (.not. recompute) then -! determine maximum number of summands in local phases of the +! determine maximum number of summands in local phases of the ! algorithm ! if (detailed_timing) call xicex_timer_start("repro_sum_allr_max") if ( present(gbl_max_nsummands) ) then @@ -509,7 +509,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif ! if (detailed_timing) call xicex_timer_stop("repro_sum_allr_max") -! determine maximum shift. Shift needs to be small enough that summation +! determine maximum shift. Shift needs to be small enough that summation ! does not exceed maximum number of digits in i8. ! if requested, return max_nsummands before it is redefined @@ -544,7 +544,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & repro_sum_fast = 1 if (recompute) then repro_sum_both = 1 - else + else ! if requested, return specified levels and upper bounds on maxima if ( present(arr_max_levels_out) ) then do ifld=1,nflds @@ -560,7 +560,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif endif -! do not have sufficient information; calculate global max/min and +! do not have sufficient information; calculate global max/min and ! use to compute required number of levels if (recompute) then @@ -622,13 +622,13 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & arr_gmin_exp(:) = arr_gextremes(1:nflds,2) ! if a field is identically zero, arr_gmin_exp still equals MAXEXPONENT -! and arr_gmax_exp still equals MINEXPONENT. In this case, set +! and arr_gmax_exp still equals MINEXPONENT. In this case, set ! arr_gmin_exp = arr_gmax_exp = MINEXPONENT do ifld=1,nflds arr_gmin_exp(ifld) = min(arr_gmax_exp(ifld),arr_gmin_exp(ifld)) enddo -! if requested, return upper bounds on observed maxima +! if requested, return upper bounds on observed maxima if ( present(arr_gbl_max_out) ) then do ifld=1,nflds arr_gbl_max_out(ifld) = scale(1.0_r8,arr_gmax_exp(ifld)) @@ -641,7 +641,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif ! determine maximum shift (same as in previous branch, but with calculated -! max_nsummands). Shift needs to be small enough that summation does not +! max_nsummands). Shift needs to be small enough that summation does not ! exceed maximum number of digits in i8. ! summing within each thread first @@ -763,14 +763,14 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & end subroutine ice_reprosum_calc !======================================================================== -! -! Purpose: -!> Compute the global sum of each field in "arr" using the indicated -!> communicator with a reproducible yet scalable implementation based +! +! Purpose: +!> Compute the global sum of each field in "arr" using the indicated +!> communicator with a reproducible yet scalable implementation based !> on a fixed point algorithm. The accuracy of the fixed point algorithm !> is controlled by the number of "levels" of integer expansion, the -!> maximum value of which is specified by max_level. -! +!> maximum value of which is specified by max_level. +! !---------------------------------------------------------------------- subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & @@ -785,29 +785,29 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & integer, intent(in) :: nsummands !< number of local summands integer, intent(in) :: dsummands !< declared first dimension integer, intent(in) :: nflds !< number of fields - integer, intent(in) :: arr_max_shift !< maximum safe exponent for - !< value < 1 (so that sum + integer, intent(in) :: arr_max_shift !< maximum safe exponent for + !< value < 1 (so that sum !< does not overflow) - integer, intent(in) :: arr_gmax_exp(nflds) + integer, intent(in) :: arr_gmax_exp(nflds) !< exponents of global maxima - integer, intent(in) :: max_levels(nflds) - !< maximum number of levels + integer, intent(in) :: max_levels(nflds) + !< maximum number of levels !< of integer expansion - integer, intent(in) :: max_level !< maximum value in + integer, intent(in) :: max_level !< maximum value in !< max_levels integer, intent(in) :: omp_nthreads !< number of OpenMP threads integer, intent(in) :: mpi_comm !< MPI subcommunicator - real(r8), intent(in) :: arr(dsummands,nflds) + real(r8), intent(in) :: arr(dsummands,nflds) !< input array logical, intent(in):: validate !< flag indicating that accuracy of solution generated from !< arr_gmax_exp and max_levels should be tested - logical, intent(out):: recompute + logical, intent(out):: recompute !< flag indicating that either the upper bounds are inaccurate, - !< or max_levels and arr_gmax_exp do not generate accurate + !< or max_levels and arr_gmax_exp do not generate accurate !< enough sums real(r8), intent(out):: arr_gsum(nflds) !< global means @@ -817,27 +817,27 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & integer, parameter :: max_jlevel = & 1 + (digits(0_i8)/digits(0.0_r8)) - integer(i8) :: i8_arr_tlsum_level(0:max_level,nflds,omp_nthreads) - ! integer vector representing local + integer(i8) :: i8_arr_tlsum_level(0:max_level,nflds,omp_nthreads) + ! integer vector representing local ! sum (per thread, per field) - integer(i8) :: i8_arr_lsum_level((max_level+3)*nflds) - ! integer vector representing local + integer(i8) :: i8_arr_lsum_level((max_level+3)*nflds) + ! integer vector representing local ! sum integer(i8) :: i8_arr_level ! integer part of summand for current ! expansion level - integer(i8) :: i8_arr_gsum_level((max_level+3)*nflds) + integer(i8) :: i8_arr_gsum_level((max_level+3)*nflds) ! integer vector representing global ! sum - integer(i8) :: IX_8 ! integer representation of current - ! jlevels of X_8 ('part' of + integer(i8) :: IX_8 ! integer representation of current + ! jlevels of X_8 ('part' of ! i8_arr_gsum_level) integer(i8) :: i8_sign ! sign global sum integer(i8) :: i8_radix ! radix for i8 variables - integer :: max_error(nflds,omp_nthreads) + integer :: max_error(nflds,omp_nthreads) ! accurate upper bound on data? - integer :: not_exact(nflds,omp_nthreads) - ! max_levels sufficient to + integer :: not_exact(nflds,omp_nthreads) + ! max_levels sufficient to ! capture all digits? integer :: isum_beg(omp_nthreads), isum_end(omp_nthreads) ! range of summand indices for each @@ -845,16 +845,16 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & integer :: ifld, isum, ithread ! loop variables integer :: arr_exp ! exponent of summand - integer :: arr_shift ! exponent used to generate integer + integer :: arr_shift ! exponent used to generate integer ! for current expansion level integer :: ilevel ! current integer expansion level - integer :: offset(nflds) ! beginning location in - ! i8_arr_{g,l}sum_level for integer + integer :: offset(nflds) ! beginning location in + ! i8_arr_{g,l}sum_level for integer ! expansion of current ifld - integer :: voffset ! modification to offset used to - ! include validation metrics + integer :: voffset ! modification to offset used to + ! include validation metrics integer :: ioffset ! offset(ifld) - integer :: jlevel ! number of floating point 'pieces' + integer :: jlevel ! number of floating point 'pieces' ! extracted from a given i8 integer integer :: ierr ! MPI error return integer :: LX(max_jlevel) ! exponent of X_8 (see below) @@ -867,13 +867,13 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! reconstruction from integer vector real(r8) :: arr_frac ! fraction of summand - real(r8) :: arr_remainder ! part of summand remaining after + real(r8) :: arr_remainder ! part of summand remaining after ! current level of integer expansion - real(r8) :: X_8(max_jlevel) ! r8 vector representation of current + real(r8) :: X_8(max_jlevel) ! r8 vector representation of current ! i8_arr_gsum_level - real(r8) :: RX_8 ! r8 representation of difference + real(r8) :: RX_8 ! r8 representation of difference ! between current i8_arr_gsum_level - ! and current jlevels of X_8 + ! and current jlevels of X_8 ! (== IX_8). Also used in final ! scaling step @@ -940,7 +940,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! calculate first shift arr_shift = arr_max_shift - (arr_gmax_exp(ifld)-arr_exp) -! determine first (probably) nonzero level (assuming initial fraction is +! determine first (probably) nonzero level (assuming initial fraction is ! 'normal' - algorithm still works if this is not true) ! NOTE: this is critical; scale will set to zero if min exponent is too small. if (arr_shift < 1) then @@ -956,7 +956,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & endif if (ilevel .le. max_levels(ifld)) then -! apply first shift/truncate, add it to the relevant running +! apply first shift/truncate, add it to the relevant running ! sum, and calculate the remainder. arr_remainder = scale(arr_frac,arr_shift) i8_arr_level = int(arr_remainder,i8) @@ -964,7 +964,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & i8_arr_tlsum_level(ilevel,ifld,ithread) + i8_arr_level arr_remainder = arr_remainder - i8_arr_level -! while the remainder is non-zero, continue to shift, truncate, +! while the remainder is non-zero, continue to shift, truncate, ! sum, and calculate new remainder do while ((arr_remainder .ne. 0.0_r8) & .and. (ilevel < max_levels(ifld))) @@ -986,9 +986,9 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & enddo ! postprocess integer vector to eliminate potential for overlap in the following -! sums over threads and processes: if value larger than or equal to -! (radix(IX_8)**arr_max_shift), add this 'overlap' to next larger integer in -! vector, resulting in nonoverlapping ranges for each component. Note that +! sums over threads and processes: if value larger than or equal to +! (radix(IX_8)**arr_max_shift), add this 'overlap' to next larger integer in +! vector, resulting in nonoverlapping ranges for each component. Note that ! "ilevel-1==0" corresponds to an extra level used to guarantee that the sums ! over threads and processes do not overflow for ilevel==1. do ilevel=max_levels(ifld),1,-1 @@ -1035,12 +1035,12 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & #if defined (NO_I8) ! Workaround for when i8 is not supported. ! if (detailed_timing) call xicex_timer_start("repro_sum_allr_i4") - call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & + call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & veclth, MPI_INTEGER, MPI_SUM, mpi_comm, ierr) ! if (detailed_timing) call xicex_timer_stop("repro_sum_allr_i4") #else ! if (detailed_timing) call xicex_timer_start("repro_sum_allr_i8") - call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & + call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & veclth, MPI_INTEGER8, MPI_SUM, mpi_comm, ierr) ! if (detailed_timing) call xicex_timer_stop("repro_sum_allr_i8") #endif @@ -1055,10 +1055,10 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! alternate. To avoid this, do some arithmetic with integer vectors so that all ! components have the same sign. This should keep relative difference between ! using different integer sizes (e.g. i8 and i4) to machine epsilon -! 3) assignment to X_8 will usually lose accuracy since maximum integer -! size is greater than the max number of 'digits' in r8 value (if xmax_nsummands -! correction not very large). Calculate remainder and add in first (since -! smaller). One correction is sufficient for r8 (53 digits) and i8 (63 digits). +! 3) assignment to X_8 will usually lose accuracy since maximum integer +! size is greater than the max number of 'digits' in r8 value (if xmax_nsummands +! correction not very large). Calculate remainder and add in first (since +! smaller). One correction is sufficient for r8 (53 digits) and i8 (63 digits). ! For r4 (24 digits) may need to correct twice. Code is written in a general ! fashion, to work no matter how many corrections are necessary (assuming ! max_jlevel parameter calculation is correct). @@ -1079,7 +1079,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & if (.not. recompute) then ! preprocess integer vector: -! a) if value larger than or equal to (radix(IX_8)**arr_max_shift), add this 'overlap' +! a) if value larger than or equal to (radix(IX_8)**arr_max_shift), add this 'overlap' ! to next larger integer in vector, resulting in nonoverlapping ranges for each ! component. Note that have "ilevel-1=0" level here as described above. do ilevel=max_levels(ifld),1,-1 @@ -1093,9 +1093,9 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & - IX_8 endif enddo -! b) subtract +/- 1 from larger and add +/- 1 to smaller when necessary -! so that all vector components have the same sign (eliminating loss -! of accuracy arising from difference of large values when +! b) subtract +/- 1 from larger and add +/- 1 to smaller when necessary +! so that all vector components have the same sign (eliminating loss +! of accuracy arising from difference of large values when ! reconstructing r8 sum from integer vector) ilevel = 0 do while ((i8_arr_gsum_level(ioffset+ilevel) .eq. 0_i8) & @@ -1117,7 +1117,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & i8_arr_gsum_level(ioffset+jlevel+1) = i8_arr_gsum_level(ioffset+jlevel+1) & + i8_sign*(i8_radix**arr_max_shift) endif - enddo + enddo endif ! start with maximum shift, and work up to larger values @@ -1130,7 +1130,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & if (i8_arr_gsum_level(ioffset+ilevel) .ne. 0_i8) then jlevel = 1 -! r8 representation of higher order bits in integer +! r8 representation of higher order bits in integer X_8(jlevel) = i8_arr_gsum_level(ioffset+ilevel) LX(jlevel) = exponent(X_8(jlevel)) @@ -1148,7 +1148,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & enddo ! add in contributions, smaller to larger, rescaling for each -! addition to guarantee that exponent of working summand is always +! addition to guarantee that exponent of working summand is always ! larger than minexponent do while (jlevel > 0) if (first) then @@ -1172,7 +1172,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! apply final exponent correction, scaling first if exponent is too small ! to apply directly corr_exp = curr_exp + exponent(arr_gsum(ifld)) - if (corr_exp .ge. MINEXPONENT(1._r8)) then + if (corr_exp .ge. MINEXPONENT(1._r8)) then arr_gsum(ifld) = set_exponent(arr_gsum(ifld),corr_exp) else RX_8 = set_exponent(arr_gsum(ifld), & @@ -1180,9 +1180,9 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & arr_gsum(ifld) = scale(RX_8,MINEXPONENT(1._r8)) endif -! if validate is .true. and some precision lost, test whether 'too much' +! if validate is .true. and some precision lost, test whether 'too much' ! was lost, due to too loose an upper bound, too stringent a limit on number -! of levels of expansion, cancellation, .... Calculated by comparing lower +! of levels of expansion, cancellation, .... Calculated by comparing lower ! bound on number of sigificant digits with number of digits in 1.0_r8 . if (validate) then if (i8_arr_gsum_level(ioffset-voffset+2) .ne. 0_i8) then @@ -1216,11 +1216,11 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & end subroutine ice_reprosum_int !======================================================================== -! -! Purpose: +! +! Purpose: !> Test whether distributed sum exceeds tolerance and print out a !> warning message. -! +! !---------------------------------------------------------------------- logical function ice_reprosum_tolExceeded (name, nflds, master, & @@ -1233,11 +1233,11 @@ logical function ice_reprosum_tolExceeded (name, nflds, master, & integer, intent(in) :: nflds !< number of fields logical, intent(in) :: master !< process that will write !< warning messages? - integer, optional, intent(in) :: logunit!< unit warning messages + integer, optional, intent(in) :: logunit!< unit warning messages !< written to real(r8), intent(in) :: rel_diff(2,nflds) !< relative and absolute - !< differences between fixed + !< differences between fixed !< and floating point sums ! Local workspace @@ -1301,12 +1301,12 @@ logical function ice_reprosum_tolExceeded (name, nflds, master, & end function ice_reprosum_tolExceeded !======================================================================== -! -! Purpose: -!> Compute the global sum of each field in "arr" using the indicated -!> communicator with a reproducible yet scalable implementation based +! +! Purpose: +!> Compute the global sum of each field in "arr" using the indicated +!> communicator with a reproducible yet scalable implementation based !> on He and Ding's implementation of the double-double algorithm. -! +! !---------------------------------------------------------------------- subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & @@ -1318,11 +1318,11 @@ subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & integer, intent(in) :: nsummands !< number of local summands integer, intent(in) :: dsummands !< declared first dimension integer, intent(in) :: nflds !< number of fields - real(r8), intent(in) :: arr(dsummands,nflds) + real(r8), intent(in) :: arr(dsummands,nflds) !< input array integer, intent(in) :: mpi_comm !< MPI subcommunicator - real(r8), intent(out):: arr_gsum(nflds) + real(r8), intent(out):: arr_gsum(nflds) !< global sums @@ -1360,8 +1360,8 @@ subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & arr_lsum_dd(ifld) = (0.0_r8,0.0_r8) do isum=1,nsummands - - ! Compute arr(isum,ifld) + arr_lsum_dd(ifld) using Knuth''s + + ! Compute arr(isum,ifld) + arr_lsum_dd(ifld) using Knuth''s ! trick. t1 = arr(isum,ifld) + real(arr_lsum_dd(ifld)) e = t1 - arr(isum,ifld) @@ -1393,11 +1393,11 @@ end subroutine ice_reprosum_ddpdd subroutine DDPDD (dda, ddb, len, itype) !---------------------------------------------------------------------- -! -! Purpose: -! Modification of original codes written by David H. Bailey +! +! Purpose: +! Modification of original codes written by David H. Bailey ! This subroutine computes ddb(i) = dda(i)+ddb(i) -! +! !---------------------------------------------------------------------- ! Arguments @@ -1433,10 +1433,10 @@ end subroutine DDPDD subroutine split_indices(total,num_pieces,ibeg,iend) !---------------------------------------------------------------------- -! -! Purpose: +! +! Purpose: ! Split range into 'num_pieces' -! +! !---------------------------------------------------------------------- ! Arguments diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 index abec3758f..baab6f49b 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 @@ -223,7 +223,7 @@ end subroutine init_ice_timers subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) -! This routine initializes a timer with a given name and returns a +! This routine initializes a timer with a given name and returns a ! timer id. character (*), intent(in) :: & @@ -236,7 +236,7 @@ subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) ! threaded region) integer (int_kind), intent(out) :: & - timer_id ! timer number assigned to this timer + timer_id ! timer number assigned to this timer !----------------------------------------------------------------------- ! @@ -267,7 +267,7 @@ subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) all_timers(n)%name = name_choice all_timers(n)%in_use = .true. all_timers(n)%num_blocks = num_blocks - all_timers(n)%num_nodes = num_nodes + all_timers(n)%num_nodes = num_nodes allocate(all_timers(n)%block_started (num_blocks), & all_timers(n)%block_cycles1 (num_blocks), & @@ -285,7 +285,7 @@ subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) if (srch_error /= 0) & call abort_ice(subname//'ERROR: Exceeded maximum number of timers') - + !----------------------------------------------------------------------- @@ -326,7 +326,7 @@ subroutine ice_timer_clear(timer_id) all_timers(timer_id)%block_accum_time(:) = c0 else call abort_ice(subname//'ERROR: attempt to reset undefined timer') - + endif !----------------------------------------------------------------------- @@ -386,7 +386,7 @@ subroutine ice_timer_start(timer_id, block_id) !*** another thread. if already started, keep track !*** of number of start requests in order to match !*** start and stop requests - + !$OMP CRITICAL if (.not. all_timers(timer_id)%node_started) then @@ -419,18 +419,18 @@ subroutine ice_timer_start(timer_id, block_id) endif else call abort_ice(subname//'ERROR: attempt to start undefined timer') - + endif !----------------------------------------------------------------------- end subroutine ice_timer_start - + !*********************************************************************** subroutine ice_timer_stop(timer_id, block_id) -! This routine stops a given node timer if appropriate. If block +! This routine stops a given node timer if appropriate. If block ! information is available the appropriate block timer is also stopped. integer (int_kind), intent(in) :: & @@ -491,7 +491,7 @@ subroutine ice_timer_stop(timer_id, block_id) !*** stop node timer if number of requested stops !*** matches the number of starts (to avoid stopping !*** a node timer started by multiple threads) - + cycles1 = all_timers(timer_id)%node_cycles1 !$OMP CRITICAL @@ -530,13 +530,13 @@ subroutine ice_timer_stop(timer_id, block_id) endif else call abort_ice(subname//'ERROR: attempt to stop undefined timer') - + endif !----------------------------------------------------------------------- end subroutine ice_timer_stop - + !*********************************************************************** subroutine ice_timer_print(timer_id,stats) @@ -560,7 +560,7 @@ subroutine ice_timer_print(timer_id,stats) integer (int_kind) :: & n,icount, & ! dummy loop index and counter - nBlocks + nBlocks logical (log_kind) :: & lrestart_timer ! flag to restart timer if timer is running @@ -613,7 +613,7 @@ subroutine ice_timer_print(timer_id,stats) local_time = c0 endif max_time = global_maxval(local_time,distrb_info) - + if (my_task == master_task) then write (nu_diag,timer_format) timer_id, & trim(all_timers(timer_id)%name),max_time @@ -735,7 +735,7 @@ end subroutine ice_timer_print_all subroutine ice_timer_check(timer_id,block_id) ! This routine checks a given timer by stopping and restarting the -! timer. This is primarily used to periodically accumulate time in +! timer. This is primarily used to periodically accumulate time in ! the timer to prevent timer cycles from wrapping around max_cycles. integer (int_kind), intent(in) :: & diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 index eb8f5d948..cafe4dc05 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 @@ -3,12 +3,12 @@ module ice_boundary ! This module contains data types and routines for updating halo -! regions (ghost cells) +! regions (ghost cells) ! ! 2007-07-19: Phil Jones, Yoshi Yoshida, John Dennis ! new naming conventions, optimizations during -! initialization, true multi-dimensional updates -! (rather than serial call to two-dimensional updates), +! initialization, true multi-dimensional updates +! (rather than serial call to two-dimensional updates), ! fixes for non-existent blocks ! 2008-01-28: Elizabeth Hunke replaced old routines with new POP ! infrastructure @@ -140,8 +140,8 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & seBlock, swBlock, &! block id southeast, southwest nbrs srcProc, dstProc, &! source, dest processor locations srcLocalID, dstLocalID, &! local block index of src,dst blocks - blockSizeX, &! size of default physical domain in X - blockSizeY, &! size of default physical domain in Y + blockSizeX, &! size of default physical domain in X + blockSizeY, &! size of default physical domain in Y eastMsgSize, westMsgSize, &! nominal sizes for e-w msgs northMsgSize, southMsgSize, &! nominal sizes for n-s msgs tripoleRows, &! number of rows in tripole buffer @@ -258,7 +258,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & srcProc, dstProc, northMsgSize) !*** if a tripole boundary block, also create a local - !*** message into and out of tripole buffer + !*** message into and out of tripole buffer if (tripoleBlock) then !*** copy in @@ -268,7 +268,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !*** copy out of tripole buffer - includes halo call ice_HaloIncrementMsgCount(sendCount, recvCount, & - srcProc, srcProc, & + srcProc, srcProc, & (nghost+1)*nx_block) endif @@ -346,7 +346,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & ewBoundaryType, nsBoundaryType) if (neBlock > 0) then - msgSize = cornerMsgSize ! normal corner message + msgSize = cornerMsgSize ! normal corner message call ice_distributionGetBlockLoc(dist, neBlock, dstProc, & dstLocalID) @@ -425,7 +425,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !----------------------------------------------------------------------- ! -! if messages are received from the same processor, the message is +! if messages are received from the same processor, the message is ! actually a local copy - count them and reset to zero ! !----------------------------------------------------------------------- @@ -526,7 +526,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !*** for tripole grids, send a north tripole message to !*** the west block to make sure enough information is !*** available for tripole manipulations - + if (tripoleBlock) then call ice_HaloMsgCreate(halo, dist, iblock, -westBlock, 'north') endif @@ -752,7 +752,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -793,7 +793,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & ! ! take care of northern boundary in tripole case ! bufTripole array contains the top nghost+1 rows (u-fold) or nghost+2 rows -! (T-fold) of physical domain for entire (global) top row +! (T-fold) of physical domain for entire (global) top row ! !----------------------------------------------------------------------- @@ -814,13 +814,13 @@ subroutine ice_HaloUpdate2DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 2,nxGlobal/2 iDst = nxGlobal - i + 2 x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -829,20 +829,20 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -851,12 +851,12 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -865,18 +865,18 @@ subroutine ice_HaloUpdate2DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -885,20 +885,20 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR8(i ,halo%tripoleRows) @@ -907,7 +907,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & bufTripoleR8(i ,halo%tripoleRows) = xavg bufTripoleR8(iDst,halo%tripoleRows) = isign*xavg end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -1053,7 +1053,7 @@ subroutine ice_HaloUpdate2DR4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -1106,13 +1106,13 @@ subroutine ice_HaloUpdate2DR4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 2,nxGlobal/2 iDst = nxGlobal - i + 2 x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1121,20 +1121,20 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1143,32 +1143,32 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select + end select else ! tripole u-fold - + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1177,20 +1177,20 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleR4(i ,halo%tripoleRows) @@ -1199,7 +1199,7 @@ subroutine ice_HaloUpdate2DR4(array, halo, & bufTripoleR4(i ,halo%tripoleRows) = xavg bufTripoleR4(iDst,halo%tripoleRows) = isign*xavg end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -1345,7 +1345,7 @@ subroutine ice_HaloUpdate2DI4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies while waiting for messages to complete -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -1398,13 +1398,13 @@ subroutine ice_HaloUpdate2DI4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 2,nxGlobal/2 iDst = nxGlobal - i + 2 x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -1413,20 +1413,20 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -1435,32 +1435,32 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - else ! tripole u-fold - + else ! tripole u-fold + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -1469,20 +1469,20 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i x1 = bufTripoleI4(i ,halo%tripoleRows) @@ -1491,7 +1491,7 @@ subroutine ice_HaloUpdate2DI4(array, halo, & bufTripoleI4(i ,halo%tripoleRows) = xavg bufTripoleI4(iDst,halo%tripoleRows) = isign*xavg end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -1617,7 +1617,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & nxGlobal = size(bufTripoleR8,dim=1) allocate(bufTripole(nxGlobal,halo%tripoleRows,nz)) bufTripole = fill - endif + endif !----------------------------------------------------------------------- ! @@ -1644,7 +1644,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -1703,10 +1703,10 @@ subroutine ice_HaloUpdate3DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -1720,20 +1720,20 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -1744,32 +1744,32 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - + end select + else ! tripole u-fold - + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i @@ -1780,20 +1780,20 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -1804,7 +1804,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -1962,7 +1962,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -2021,10 +2021,10 @@ subroutine ice_HaloUpdate3DR4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -2038,20 +2038,20 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -2062,32 +2062,32 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold - + end select + + else ! tripole u-fold + select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i @@ -2098,20 +2098,20 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -2122,7 +2122,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select @@ -2280,7 +2280,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -2323,7 +2323,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then - + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -2339,10 +2339,10 @@ subroutine ice_HaloUpdate3DI4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -2356,20 +2356,20 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -2380,32 +2380,32 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - - else ! tripole u-fold + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 - 1 iDst = nxGlobal - i @@ -2416,20 +2416,20 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do k=1,nz do i = 1,nxGlobal/2 iDst = nxGlobal + 1 - i @@ -2440,11 +2440,11 @@ subroutine ice_HaloUpdate3DI4(array, halo, & bufTripole(iDst,halo%tripoleRows,k) = isign*xavg end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -2599,7 +2599,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -2648,7 +2648,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then - + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -2664,10 +2664,10 @@ subroutine ice_HaloUpdate4DR8(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -2683,17 +2683,17 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -2709,32 +2709,32 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold + end select + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 - 1 @@ -2747,20 +2747,20 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 @@ -2773,11 +2773,11 @@ subroutine ice_HaloUpdate4DR8(array, halo, & end do end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -2934,7 +2934,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -2983,7 +2983,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & !----------------------------------------------------------------------- if (nxGlobal > 0) then - + select case (fieldKind) case (field_type_scalar) isign = 1 @@ -2999,10 +2999,10 @@ subroutine ice_HaloUpdate4DR4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -3018,17 +3018,17 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -3044,32 +3044,32 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold + end select + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 - 1 @@ -3082,20 +3082,20 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 @@ -3108,11 +3108,11 @@ subroutine ice_HaloUpdate4DR4(array, halo, & end do end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -3269,7 +3269,7 @@ subroutine ice_HaloUpdate4DI4(array, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -3334,10 +3334,10 @@ subroutine ice_HaloUpdate4DI4(array, halo, & select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = -1 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -3353,17 +3353,17 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 0 joffset = 1 - + case (field_loc_Eface) ! cell center location - + ioffset = 0 joffset = 0 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value @@ -3379,32 +3379,32 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = -1 joffset = 1 - + case default call abort_ice(subname//'ERROR: Unknown field location') - end select - - else ! tripole u-fold + end select + + else ! tripole u-fold select case (fieldLoc) case (field_loc_center) ! cell center location - + ioffset = 0 joffset = 0 - + case (field_loc_NEcorner) ! cell corner location - + ioffset = 1 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 - 1 @@ -3417,20 +3417,20 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case (field_loc_Eface) ! cell center location - + ioffset = 1 joffset = 0 - + case (field_loc_Nface) ! cell corner (velocity) location - + ioffset = 0 joffset = 1 - + !*** top row is degenerate, so must enforce symmetry !*** use average of two degenerate points for value - + do l=1,nt do k=1,nz do i = 1,nxGlobal/2 @@ -3443,11 +3443,11 @@ subroutine ice_HaloUpdate4DI4(array, halo, & end do end do end do - + case default call abort_ice(subname//'ERROR: Unknown field location') end select - + endif !*** copy out of global tripole buffer into local @@ -3587,7 +3587,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & !----------------------------------------------------------------------- ! ! do local copies -! if srcBlock is zero, that denotes an eliminated land block or a +! if srcBlock is zero, that denotes an eliminated land block or a ! closed boundary where ghost cell values are undefined ! if srcBlock is less than zero, the message is a copy out of the ! tripole buffer and will be treated later @@ -3616,7 +3616,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & ! ! take care of northern boundary in tripole case ! bufTripole array contains the top haloWidth+1 rows of physical -! domain for entire (global) top row +! domain for entire (global) top row ! !----------------------------------------------------------------------- @@ -3644,12 +3644,12 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & ioffset = 1 joffset = 1 - case (field_loc_Eface) + case (field_loc_Eface) ioffset = 1 joffset = 0 - case (field_loc_Nface) + case (field_loc_Nface) ioffset = 0 joffset = 1 @@ -3680,7 +3680,7 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & iSrc = iSrc - ioffset jSrc = jSrc - joffset if (iSrc == 0) iSrc = nxGlobal - + !*** for center and Eface, do not need to replace !*** top row of physical domain, so jSrc should be !*** out of range and skipped @@ -3735,7 +3735,7 @@ subroutine ice_HaloIncrementMsgCount(sndCounter, rcvCounter, & !----------------------------------------------------------------------- ! -! if destination all land or outside closed boundary (dstProc = 0), +! if destination all land or outside closed boundary (dstProc = 0), ! then no send is necessary, so do the rest only for dstProc /= 0 ! !----------------------------------------------------------------------- @@ -3744,7 +3744,7 @@ subroutine ice_HaloIncrementMsgCount(sndCounter, rcvCounter, & !----------------------------------------------------------------------- ! -! if the current processor is the source, must send data +! if the current processor is the source, must send data ! local copy if dstProc = srcProc ! !----------------------------------------------------------------------- @@ -3754,14 +3754,14 @@ subroutine ice_HaloIncrementMsgCount(sndCounter, rcvCounter, & !----------------------------------------------------------------------- ! -! if the current processor is the destination, must receive data +! if the current processor is the destination, must receive data ! local copy if dstProc = srcProc ! !----------------------------------------------------------------------- if (dstProc == my_task + 1) then - if (srcProc > 0) then + if (srcProc > 0) then !*** the source block has ocean points !*** count as a receive from srcProc @@ -3852,7 +3852,7 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) !----------------------------------------------------------------------- ! -! if destination all land or outside closed boundary (dstProc = 0), +! if destination all land or outside closed boundary (dstProc = 0), ! then no send is necessary, so do the rest only for dstProc /= 0 ! !----------------------------------------------------------------------- @@ -3894,7 +3894,7 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) !----------------------------------------------------------------------- if (srcProc == my_task+1 .and. & - dstProc == my_task+1) then + dstProc == my_task+1) then !*** compute addresses based on direction @@ -3989,7 +3989,7 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) if (jeSrc - jbSrc + 1 < halo%tripoleRows) then call abort_ice(subname//'ERROR: not enough points in block for tripole') return - endif + endif msgIndx = halo%numLocalCopies @@ -4013,7 +4013,7 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) else if (srcBlock < 0 .and. dstBlock > 0) then - !*** tripole grid - set up for copying out of + !*** tripole grid - set up for copying out of !*** tripole buffer into ghost cell domains !*** include e-w ghost cells @@ -4097,7 +4097,7 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) else - !*** tripole grid - copy entire top halo+1 + !*** tripole grid - copy entire top halo+1 !*** rows into global buffer at src location msgIndx = halo%numLocalCopies @@ -4151,7 +4151,7 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) else - !*** tripole grid - copy entire top halo+1 + !*** tripole grid - copy entire top halo+1 !*** rows into global buffer at src location msgIndx = halo%numLocalCopies @@ -4235,12 +4235,12 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) !----------------------------------------------------------------------- ! -! if dest block is local and source block does not exist, create a +! if dest block is local and source block does not exist, create a ! local copy to fill halo with a fill value ! !----------------------------------------------------------------------- - else if (srcProc == 0 .and. dstProc == my_task+1) then + else if (srcProc == 0 .and. dstProc == my_task+1) then !*** compute addresses based on direction @@ -4481,14 +4481,14 @@ end subroutine ice_HaloMsgCreate subroutine ice_HaloExtrapolate2DR8(ARRAY,dist,ew_bndy_type,ns_bndy_type) -! This subroutine extrapolates ARRAY values into the first row or column -! of ghost cells, and is intended for grid variables whose ghost cells -! would otherwise be set using the default boundary conditions (Dirichlet +! This subroutine extrapolates ARRAY values into the first row or column +! of ghost cells, and is intended for grid variables whose ghost cells +! would otherwise be set using the default boundary conditions (Dirichlet ! or Neumann). ! Note: This routine will need to be modified for nghost > 1. ! We assume padding occurs only on east and north edges. ! -! This is the specific interface for double precision arrays +! This is the specific interface for double precision arrays ! corresponding to the generic interface ice_HaloExtrapolate use ice_blocks, only: block, nblocks_x, nblocks_y, get_block diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_gather_scatter.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_gather_scatter.F90 index 4b0bb1f9e..34cca2d03 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_gather_scatter.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_gather_scatter.F90 @@ -346,7 +346,7 @@ subroutine gather_global_ext_dbl(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) real (dbl_kind), optional :: & spc_val - + real (dbl_kind), dimension(:,:), intent(inout) :: & ARRAY_G ! array containing global horizontal field on dst_task @@ -504,7 +504,7 @@ subroutine gather_global_ext_int(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) integer (int_kind), optional :: & spc_val - + integer (int_kind), dimension(:,:), intent(inout) :: & ARRAY_G ! array containing global horizontal field on dst_task @@ -662,7 +662,7 @@ subroutine gather_global_ext_log(ARRAY_G, ARRAY, dst_task, src_dist, spc_val) logical (log_kind), optional :: & spc_val - + logical (log_kind), dimension(:,:), intent(inout) :: & ARRAY_G ! array containing global horizontal field on dst_task @@ -1581,7 +1581,7 @@ subroutine scatter_global_ext(ARRAY, ARRAY_G, src_task, dst_dist) !----------------------------------------------------------------------- ! -! initialize return array to zero +! initialize return array to zero ! !----------------------------------------------------------------------- @@ -1754,10 +1754,10 @@ subroutine scatter_global_stress(ARRAY, ARRAY_G1, ARRAY_G2, & this_block = get_block(1,1) ! for the tripoleTflag - all blocks have it if (this_block%tripoleTFlag) then - xoffset = 2 ! treat stresses as cell-centered scalars (they are not + xoffset = 2 ! treat stresses as cell-centered scalars (they are not yoffset = 0 ! shared with neighboring grid cells) else - xoffset = 1 ! treat stresses as cell-centered scalars (they are not + xoffset = 1 ! treat stresses as cell-centered scalars (they are not yoffset = 1 ! shared with neighboring grid cells) endif isign = 1 diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 index a024698d5..e859ea2bd 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 @@ -534,7 +534,6 @@ function global_sum_scalar_dbl(scalar, dist) & !----------------------------------------------------------------------- integer (int_kind) :: & - ierr, &! mpi error flag numProcs, &! number of processor participating numBlocks, &! number of local blocks communicator ! communicator for this distribution @@ -604,7 +603,6 @@ function global_sum_scalar_real(scalar, dist) & !----------------------------------------------------------------------- integer (int_kind) :: & - ierr, &! mpi error flag numProcs, &! number of processor participating numBlocks, &! number of local blocks communicator ! communicator for this distribution @@ -715,7 +713,7 @@ end function global_sum_scalar_int function global_allreduce_sum_vector_dbl(vector, dist) & result(globalSums) -! Computes the global sums of sets of scalars (elements of 'vector') +! Computes the global sums of sets of scalars (elements of 'vector') ! distributed across a parallel machine. ! ! This is actually the specific interface for the generic global_allreduce_sum @@ -738,7 +736,6 @@ function global_allreduce_sum_vector_dbl(vector, dist) & !----------------------------------------------------------------------- integer (int_kind) :: & - ierr, &! mpi error flag numProcs, &! number of processor participating numBlocks, &! number of local blocks communicator, &! communicator for this distribution @@ -782,9 +779,9 @@ function global_sum_prod_dbl (array1, array2, dist, field_loc, & ! Computes the global sum of the physical domain of a product of ! two 2-d arrays. ! -! This is actually the specific interface for the generic +! This is actually the specific interface for the generic ! global_sum_prod function corresponding to double precision arrays. -! The generic interface is identical but will handle real and integer +! The generic interface is identical but will handle real and integer ! 2-d slabs. real (dbl_kind), dimension(:,:,:), intent(in) :: & @@ -920,9 +917,9 @@ function global_sum_prod_real (array1, array2, dist, field_loc, & ! Computes the global sum of the physical domain of a product of ! two 2-d arrays. ! -! This is actually the specific interface for the generic +! This is actually the specific interface for the generic ! global_sum_prod function corresponding to single precision arrays. -! The generic interface is identical but will handle real and integer +! The generic interface is identical but will handle real and integer ! 2-d slabs. real (real_kind), dimension(:,:,:), intent(in) :: & @@ -1058,9 +1055,9 @@ function global_sum_prod_int (array1, array2, dist, field_loc, & ! Computes the global sum of the physical domain of a product of ! two 2-d arrays. ! -! This is actually the specific interface for the generic +! This is actually the specific interface for the generic ! global_sum_prod function corresponding to integer arrays. -! The generic interface is identical but will handle real and integer +! The generic interface is identical but will handle real and integer ! 2-d slabs. integer (int_kind), dimension(:,:,:), intent(in) :: & @@ -1199,7 +1196,7 @@ function global_maxval_dbl (array, dist, lMask) & ! Computes the global maximum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to double precision arrays. +! function corresponding to double precision arrays. real (dbl_kind), dimension(:,:,:), intent(in) :: & array ! array for which max value needed @@ -1306,7 +1303,7 @@ function global_maxval_real (array, dist, lMask) & ! Computes the global maximum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision arrays. +! function corresponding to single precision arrays. real (real_kind), dimension(:,:,:), intent(in) :: & array ! array for which max value needed @@ -1413,7 +1410,7 @@ function global_maxval_int (array, dist, lMask) & ! Computes the global maximum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to integer arrays. +! function corresponding to integer arrays. integer (int_kind), dimension(:,:,:), intent(in) :: & array ! array for which max value needed @@ -1521,7 +1518,7 @@ function global_maxval_scalar_dbl (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to double precision scalars. +! function corresponding to double precision scalars. real (dbl_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1579,7 +1576,7 @@ function global_maxval_scalar_real (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. real (real_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1637,7 +1634,7 @@ function global_maxval_scalar_int (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1695,7 +1692,7 @@ function global_maxval_scalar_int_nodist (scalar, communicator) & ! a communicator. This method supports testing. ! ! This is actually the specific interface for the generic global_maxval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which max value needed @@ -1744,7 +1741,7 @@ function global_minval_dbl (array, dist, lMask) & ! Computes the global minimum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_minval -! function corresponding to double precision arrays. +! function corresponding to double precision arrays. real (dbl_kind), dimension(:,:,:), intent(in) :: & array ! array for which min value needed @@ -1851,7 +1848,7 @@ function global_minval_real (array, dist, lMask) & ! Computes the global minimum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision arrays. +! function corresponding to single precision arrays. real (real_kind), dimension(:,:,:), intent(in) :: & array ! array for which min value needed @@ -1958,7 +1955,7 @@ function global_minval_int (array, dist, lMask) & ! Computes the global minimum value of the physical domain of a 2-d field ! ! This is actually the specific interface for the generic global_minval -! function corresponding to integer arrays. +! function corresponding to integer arrays. integer (int_kind), dimension(:,:,:), intent(in) :: & array ! array for which min value needed @@ -2066,7 +2063,7 @@ function global_minval_scalar_dbl (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to double precision scalars. +! function corresponding to double precision scalars. real (dbl_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2124,7 +2121,7 @@ function global_minval_scalar_real (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. real (real_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2182,7 +2179,7 @@ function global_minval_scalar_int (scalar, dist) & ! a distributed machine. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2240,7 +2237,7 @@ function global_minval_scalar_int_nodist (scalar, communicator) & ! a communicator. This method supports testing. ! ! This is actually the specific interface for the generic global_minval -! function corresponding to single precision scalars. +! function corresponding to single precision scalars. integer (int_kind), intent(in) :: & scalar ! scalar for which min value needed @@ -2300,7 +2297,7 @@ subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) ! reprosum = fixed point method based on ordered double integer sums. ! that requires two scalar reductions per global sum. ! This is extremely likely to be bfb. -! (See Mirin and Worley, 2012, IJHPCA, 26, 1730, +! (See Mirin and Worley, 2012, IJHPCA, 26, 1730, ! https://journals.sagepub.com/doi/10.1177/1094342011412630) ! ddpdd = parallel double-double algorithm using single scalar reduction. ! This is very likely to be bfb. diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 index 13ff6fcb8..2c584bd94 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 @@ -4,34 +4,34 @@ MODULE ice_reprosum -!----------------------------------------------------------------------- -! -! Purpose: -!> Compute reproducible global sums of a set of arrays across an MPI +!----------------------------------------------------------------------- +! +! Purpose: +!> Compute reproducible global sums of a set of arrays across an MPI !> subcommunicator ! -! Methods: +! Methods: !> Compute using either or both a scalable, reproducible algorithm and a !> scalable, nonreproducible algorithm: -!> * Reproducible (scalable): +!> * Reproducible (scalable): !> Convert to fixed point (integer vector representation) to enable !> reproducibility when using MPI_Allreduce !> * Alternative usually reproducible (scalable): -!> Use parallel double-double algorithm due to Helen He and +!> Use parallel double-double algorithm due to Helen He and !> Chris Ding, based on David Bailey's/Don Knuth's DDPDD algorithm -!> * Nonreproducible (scalable): +!> * Nonreproducible (scalable): !> Floating point and MPI_Allreduce based. -!> If computing both reproducible and nonreproducible sums, compare +!> If computing both reproducible and nonreproducible sums, compare !> these and report relative difference (if absolute difference !> less than sum) or absolute difference back to calling routine. ! -! Author: P. Worley (based on suggestions from J. White for fixed -! point algorithm and on He/Ding paper for ddpdd +! Author: P. Worley (based on suggestions from J. White for fixed +! point algorithm and on He/Ding paper for ddpdd ! algorithm) ! ! Modified by T.Craig for CICE, March 2019 based on the public version in ! Oasis3-MCT_4.0. -! +! !----------------------------------------------------------------------- !----------------------------------------------------------------------- @@ -66,7 +66,7 @@ MODULE ice_reprosum public :: & ice_reprosum_setopts, &! set runtime options ice_reprosum_calc, &! calculate distributed sum - ice_reprosum_tolExceeded ! utility function to check relative + ice_reprosum_tolExceeded ! utility function to check relative ! differences against the tolerance !----------------------------------------------------------------------- @@ -94,8 +94,8 @@ MODULE ice_reprosum CONTAINS !======================================================================== -!----------------------------------------------------------------------- -! Purpose: +!----------------------------------------------------------------------- +! Purpose: !> Set runtime options ! Author: P. Worley !----------------------------------------------------------------------- @@ -110,11 +110,11 @@ subroutine ice_reprosum_setopts(repro_sum_use_ddpdd_in, & logical, intent(in), optional :: repro_sum_use_ddpdd_in !< Use DDPDD algorithm instead of fixed precision algorithm real(r8), intent(in), optional :: repro_sum_rel_diff_max_in - !< maximum permissible difference between reproducible and + !< maximum permissible difference between reproducible and !< nonreproducible sums logical, intent(in), optional :: repro_sum_recompute_in - !< recompute using different algorithm when difference between - !< reproducible and nonreproducible sums is too great + !< recompute using different algorithm when difference between + !< reproducible and nonreproducible sums is too great logical, intent(in), optional :: repro_sum_master !< flag indicating whether this process should output !< log messages @@ -188,10 +188,10 @@ end subroutine ice_reprosum_setopts !======================================================================== -! -! Purpose: -!> Compute the global sum of each field in "arr" using the indicated -!> communicator with a reproducible yet scalable implementation based +! +! Purpose: +!> Compute the global sum of each field in "arr" using the indicated +!> communicator with a reproducible yet scalable implementation based !> on a fixed point algorithm. An alternative is to use an "almost !> always reproducible" floating point algorithm. ! @@ -199,65 +199,65 @@ end subroutine ice_reprosum_setopts ! number of "levels" of integer expansion. The algorithm will calculate ! the number of levels that is required for the sum to be essentially ! exact. The optional parameter arr_max_levels can be used to override -! the calculated value. The optional parameter arr_max_levels_out can be +! the calculated value. The optional parameter arr_max_levels_out can be ! used to return the values used. ! -! The algorithm also requires an upper bound on -! the maximum summand (in absolute value) for each field, and will +! The algorithm also requires an upper bound on +! the maximum summand (in absolute value) for each field, and will ! calculate this internally. However, if the optional parameters ! arr_max_levels and arr_gbl_max are both set, then the algorithm will ! use the values in arr_gbl_max for the upper bounds instead. If these ! are not upper bounds, or if the upper bounds are not tight enough ! to achieve the requisite accuracy, and if the optional parameter ! repro_sum_validate is NOT set to .false., the algorithm will repeat the -! computation with appropriate upper bounds. If only arr_gbl_max is present, +! computation with appropriate upper bounds. If only arr_gbl_max is present, ! then the maxima are computed internally (and the specified values are -! ignored). The optional parameter arr_gbl_max_out can be +! ignored). The optional parameter arr_gbl_max_out can be ! used to return the values used. ! ! Finally, the algorithm requires an upper bound on the number of -! local summands across all processes. This will be calculated internally, -! using an MPI collective, but the value in the optional argument +! local summands across all processes. This will be calculated internally, +! using an MPI collective, but the value in the optional argument ! gbl_max_nsummands will be used instead if (1) it is present, (2) -! it is > 0, and (3) the maximum value and required number of levels -! are also specified. (If the maximum value is calculated, the same -! MPI collective is used to determine the maximum number of local -! summands.) The accuracy of the user-specified value is not checked. -! However, if set to < 1, the value will instead be calculated. If the -! optional parameter gbl_max_nsummands_out is present, then the value -! used (gbl_max_nsummands if >= 1; calculated otherwise) will be +! it is > 0, and (3) the maximum value and required number of levels +! are also specified. (If the maximum value is calculated, the same +! MPI collective is used to determine the maximum number of local +! summands.) The accuracy of the user-specified value is not checked. +! However, if set to < 1, the value will instead be calculated. If the +! optional parameter gbl_max_nsummands_out is present, then the value +! used (gbl_max_nsummands if >= 1; calculated otherwise) will be ! returned. ! ! If requested (by setting ice_reprosum_reldiffmax >= 0.0 and passing in -! the optional rel_diff parameter), results are compared with a -! nonreproducible floating point algorithm. +! the optional rel_diff parameter), results are compared with a +! nonreproducible floating point algorithm. ! -! Note that the cost of the algorithm is not strongly correlated with +! Note that the cost of the algorithm is not strongly correlated with ! the number of levels, which primarily shows up as a (modest) increase -! in cost of the MPI_Allreduce as a function of vector length. Rather the -! cost is more a function of (a) the number of integers required to +! in cost of the MPI_Allreduce as a function of vector length. Rather the +! cost is more a function of (a) the number of integers required to ! represent an individual summand and (b) the number of MPI_Allreduce -! calls. The number of integers required to represent an individual -! summand is 1 or 2 when using 8-byte integers for 8-byte real summands -! when the number of local summands is not too large. As the number of -! local summands increases, the number of integers required increases. +! calls. The number of integers required to represent an individual +! summand is 1 or 2 when using 8-byte integers for 8-byte real summands +! when the number of local summands is not too large. As the number of +! local summands increases, the number of integers required increases. ! The number of MPI_Allreduce calls is either 2 (specifying nothing) or -! 1 (specifying gbl_max_nsummands, arr_max_levels, and arr_gbl_max -! correctly). When specifying arr_max_levels and arr_gbl_max +! 1 (specifying gbl_max_nsummands, arr_max_levels, and arr_gbl_max +! correctly). When specifying arr_max_levels and arr_gbl_max ! incorrectly, 3 or 4 MPI_Allreduce calls will be required. ! ! The alternative algorithm is a minor modification of a parallel ! implementation of David Bailey's routine DDPDD by Helen He ! and Chris Ding. Bailey uses the Knuth trick to implement quadruple -! precision summation of double precision values with 10 double -! precision operations. The advantage of this algorithm is that +! precision summation of double precision values with 10 double +! precision operations. The advantage of this algorithm is that ! it requires a single MPI_Allreduce and is less expensive per summand -! than is the fixed precision algorithm. The disadvantage is that it -! is not guaranteed to be reproducible (though it is reproducible -! much more often than is the standard algorithm). This alternative -! is used when the optional parameter ddpdd_sum is set to .true. It is -! also used if the fixed precision algorithm radix assumption does not -! hold. +! than is the fixed precision algorithm. The disadvantage is that it +! is not guaranteed to be reproducible (though it is reproducible +! much more often than is the standard algorithm). This alternative +! is used when the optional parameter ddpdd_sum is set to .true. It is +! also used if the fixed precision algorithm radix assumption does not +! hold. !---------------------------------------------------------------------- subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & @@ -274,10 +274,10 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & integer, intent(in) :: nsummands !< number of local summands integer, intent(in) :: dsummands !< declared first dimension integer, intent(in) :: nflds !< number of fields - real(r8), intent(in) :: arr(dsummands,nflds) + real(r8), intent(in) :: arr(dsummands,nflds) !< input array - real(r8), intent(out):: arr_gsum(nflds) + real(r8), intent(out):: arr_gsum(nflds) !< global means logical, intent(in), optional :: ddpdd_sum @@ -288,7 +288,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & !< upper bound on max(abs(arr)) real(r8), intent(out), optional :: arr_gbl_max_out(nflds) - !< calculated upper bound on + !< calculated upper bound on !< max(abs(arr)) integer, intent(in), optional :: arr_max_levels(nflds) @@ -307,13 +307,13 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & !< calculated maximum nsummands !< over all processes - integer, intent(in), optional :: gbl_count + integer, intent(in), optional :: gbl_count !< was total number of summands; - !< now is ignored; use + !< now is ignored; use !< gbl_max_nsummands instead logical, intent(in), optional :: repro_sum_validate - !< flag enabling/disabling testing that gmax and max_levels are + !< flag enabling/disabling testing that gmax and max_levels are !< accurate/sufficient. Default is enabled. integer, intent(inout), optional :: repro_sum_stats(5) @@ -326,21 +326,21 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & real(r8), intent(out), optional :: rel_diff(2,nflds) !< relative and absolute - !< differences between fixed + !< differences between fixed !< and floating point sums - integer, intent(in), optional :: commid + integer, intent(in), optional :: commid !< MPI communicator ! Local workspace logical :: use_ddpdd_sum ! flag indicating whether to ! use ice_reprosum_ddpdd or not - logical :: recompute ! flag indicating need to - ! determine gmax/gmin before + logical :: recompute ! flag indicating need to + ! determine gmax/gmin before ! computing sum - logical :: validate ! flag indicating need to - ! verify gmax and max_levels + logical :: validate ! flag indicating need to + ! verify gmax and max_levels ! are accurate/sufficient integer :: omp_nthreads ! number of OpenMP threads integer :: mpi_comm ! MPI subcommunicator @@ -365,12 +365,12 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & integer :: arr_lextremes(0:nflds,2)! local exponent extrema integer :: arr_gextremes(0:nflds,2)! global exponent extrema - integer :: arr_gmax_exp(nflds) ! global exponents maxima + integer :: arr_gmax_exp(nflds) ! global exponents maxima integer :: arr_gmin_exp(nflds) ! global exponents minima - integer :: arr_max_shift ! maximum safe exponent for + integer :: arr_max_shift ! maximum safe exponent for ! value < 1 (so that sum does ! not overflow) - integer :: max_levels(nflds) ! maximum number of levels of + integer :: max_levels(nflds) ! maximum number of levels of ! integer expansion to use integer :: max_level ! maximum value in max_levels integer :: gbl_max_red ! global max local sum reduction? (0/1) @@ -381,16 +381,16 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & real(r8) :: xmax_nsummands ! dble of max_nsummands real(r8) :: arr_lsum(nflds) ! local sums - real(r8) :: arr_gsum_fast(nflds) ! global sum calculated using - ! fast, nonreproducible, + real(r8) :: arr_gsum_fast(nflds) ! global sum calculated using + ! fast, nonreproducible, ! floating point alg. - real(r8) :: abs_diff ! absolute difference between - ! fixed and floating point + real(r8) :: abs_diff ! absolute difference between + ! fixed and floating point ! sums #ifdef _OPENMP integer omp_get_max_threads external omp_get_max_threads -#endif +#endif character(len=*),parameter :: subname = '(ice_reprosum_calc)' !----------------------------------------------------------------------- @@ -484,7 +484,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & if (.not. recompute) then -! determine maximum number of summands in local phases of the +! determine maximum number of summands in local phases of the ! algorithm ! if (detailed_timing) call xicex_timer_start("repro_sum_allr_max") if ( present(gbl_max_nsummands) ) then @@ -510,7 +510,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif ! if (detailed_timing) call xicex_timer_stop("repro_sum_allr_max") -! determine maximum shift. Shift needs to be small enough that summation +! determine maximum shift. Shift needs to be small enough that summation ! does not exceed maximum number of digits in i8. ! if requested, return max_nsummands before it is redefined @@ -545,7 +545,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & repro_sum_fast = 1 if (recompute) then repro_sum_both = 1 - else + else ! if requested, return specified levels and upper bounds on maxima if ( present(arr_max_levels_out) ) then do ifld=1,nflds @@ -561,7 +561,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif endif -! do not have sufficient information; calculate global max/min and +! do not have sufficient information; calculate global max/min and ! use to compute required number of levels if (recompute) then @@ -623,13 +623,13 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & arr_gmin_exp(:) = arr_gextremes(1:nflds,2) ! if a field is identically zero, arr_gmin_exp still equals MAXEXPONENT -! and arr_gmax_exp still equals MINEXPONENT. In this case, set +! and arr_gmax_exp still equals MINEXPONENT. In this case, set ! arr_gmin_exp = arr_gmax_exp = MINEXPONENT do ifld=1,nflds arr_gmin_exp(ifld) = min(arr_gmax_exp(ifld),arr_gmin_exp(ifld)) enddo -! if requested, return upper bounds on observed maxima +! if requested, return upper bounds on observed maxima if ( present(arr_gbl_max_out) ) then do ifld=1,nflds arr_gbl_max_out(ifld) = scale(1.0_r8,arr_gmax_exp(ifld)) @@ -642,7 +642,7 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & endif ! determine maximum shift (same as in previous branch, but with calculated -! max_nsummands). Shift needs to be small enough that summation does not +! max_nsummands). Shift needs to be small enough that summation does not ! exceed maximum number of digits in i8. ! summing within each thread first @@ -764,14 +764,14 @@ subroutine ice_reprosum_calc (arr, arr_gsum, nsummands, dsummands, & end subroutine ice_reprosum_calc !======================================================================== -! -! Purpose: -!> Compute the global sum of each field in "arr" using the indicated -!> communicator with a reproducible yet scalable implementation based +! +! Purpose: +!> Compute the global sum of each field in "arr" using the indicated +!> communicator with a reproducible yet scalable implementation based !> on a fixed point algorithm. The accuracy of the fixed point algorithm !> is controlled by the number of "levels" of integer expansion, the -!> maximum value of which is specified by max_level. -! +!> maximum value of which is specified by max_level. +! !---------------------------------------------------------------------- subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & @@ -786,29 +786,29 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & integer, intent(in) :: nsummands !< number of local summands integer, intent(in) :: dsummands !< declared first dimension integer, intent(in) :: nflds !< number of fields - integer, intent(in) :: arr_max_shift !< maximum safe exponent for - !< value < 1 (so that sum + integer, intent(in) :: arr_max_shift !< maximum safe exponent for + !< value < 1 (so that sum !< does not overflow) - integer, intent(in) :: arr_gmax_exp(nflds) + integer, intent(in) :: arr_gmax_exp(nflds) !< exponents of global maxima - integer, intent(in) :: max_levels(nflds) - !< maximum number of levels + integer, intent(in) :: max_levels(nflds) + !< maximum number of levels !< of integer expansion - integer, intent(in) :: max_level !< maximum value in + integer, intent(in) :: max_level !< maximum value in !< max_levels integer, intent(in) :: omp_nthreads !< number of OpenMP threads integer, intent(in) :: mpi_comm !< MPI subcommunicator - real(r8), intent(in) :: arr(dsummands,nflds) + real(r8), intent(in) :: arr(dsummands,nflds) !< input array logical, intent(in):: validate !< flag indicating that accuracy of solution generated from !< arr_gmax_exp and max_levels should be tested - logical, intent(out):: recompute + logical, intent(out):: recompute !< flag indicating that either the upper bounds are inaccurate, - !< or max_levels and arr_gmax_exp do not generate accurate + !< or max_levels and arr_gmax_exp do not generate accurate !< enough sums real(r8), intent(out):: arr_gsum(nflds) !< global means @@ -818,27 +818,27 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & integer, parameter :: max_jlevel = & 1 + (digits(0_i8)/digits(0.0_r8)) - integer(i8) :: i8_arr_tlsum_level(0:max_level,nflds,omp_nthreads) - ! integer vector representing local + integer(i8) :: i8_arr_tlsum_level(0:max_level,nflds,omp_nthreads) + ! integer vector representing local ! sum (per thread, per field) - integer(i8) :: i8_arr_lsum_level((max_level+3)*nflds) - ! integer vector representing local + integer(i8) :: i8_arr_lsum_level((max_level+3)*nflds) + ! integer vector representing local ! sum integer(i8) :: i8_arr_level ! integer part of summand for current ! expansion level - integer(i8) :: i8_arr_gsum_level((max_level+3)*nflds) + integer(i8) :: i8_arr_gsum_level((max_level+3)*nflds) ! integer vector representing global ! sum - integer(i8) :: IX_8 ! integer representation of current - ! jlevels of X_8 ('part' of + integer(i8) :: IX_8 ! integer representation of current + ! jlevels of X_8 ('part' of ! i8_arr_gsum_level) integer(i8) :: i8_sign ! sign global sum integer(i8) :: i8_radix ! radix for i8 variables - integer :: max_error(nflds,omp_nthreads) + integer :: max_error(nflds,omp_nthreads) ! accurate upper bound on data? - integer :: not_exact(nflds,omp_nthreads) - ! max_levels sufficient to + integer :: not_exact(nflds,omp_nthreads) + ! max_levels sufficient to ! capture all digits? integer :: isum_beg(omp_nthreads), isum_end(omp_nthreads) ! range of summand indices for each @@ -846,16 +846,16 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & integer :: ifld, isum, ithread ! loop variables integer :: arr_exp ! exponent of summand - integer :: arr_shift ! exponent used to generate integer + integer :: arr_shift ! exponent used to generate integer ! for current expansion level integer :: ilevel ! current integer expansion level - integer :: offset(nflds) ! beginning location in - ! i8_arr_{g,l}sum_level for integer + integer :: offset(nflds) ! beginning location in + ! i8_arr_{g,l}sum_level for integer ! expansion of current ifld - integer :: voffset ! modification to offset used to - ! include validation metrics + integer :: voffset ! modification to offset used to + ! include validation metrics integer :: ioffset ! offset(ifld) - integer :: jlevel ! number of floating point 'pieces' + integer :: jlevel ! number of floating point 'pieces' ! extracted from a given i8 integer integer :: ierr ! MPI error return integer :: LX(max_jlevel) ! exponent of X_8 (see below) @@ -868,13 +868,13 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! reconstruction from integer vector real(r8) :: arr_frac ! fraction of summand - real(r8) :: arr_remainder ! part of summand remaining after + real(r8) :: arr_remainder ! part of summand remaining after ! current level of integer expansion - real(r8) :: X_8(max_jlevel) ! r8 vector representation of current + real(r8) :: X_8(max_jlevel) ! r8 vector representation of current ! i8_arr_gsum_level - real(r8) :: RX_8 ! r8 representation of difference + real(r8) :: RX_8 ! r8 representation of difference ! between current i8_arr_gsum_level - ! and current jlevels of X_8 + ! and current jlevels of X_8 ! (== IX_8). Also used in final ! scaling step @@ -941,7 +941,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! calculate first shift arr_shift = arr_max_shift - (arr_gmax_exp(ifld)-arr_exp) -! determine first (probably) nonzero level (assuming initial fraction is +! determine first (probably) nonzero level (assuming initial fraction is ! 'normal' - algorithm still works if this is not true) ! NOTE: this is critical; scale will set to zero if min exponent is too small. if (arr_shift < 1) then @@ -957,7 +957,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & endif if (ilevel .le. max_levels(ifld)) then -! apply first shift/truncate, add it to the relevant running +! apply first shift/truncate, add it to the relevant running ! sum, and calculate the remainder. arr_remainder = scale(arr_frac,arr_shift) i8_arr_level = int(arr_remainder,i8) @@ -965,7 +965,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & i8_arr_tlsum_level(ilevel,ifld,ithread) + i8_arr_level arr_remainder = arr_remainder - i8_arr_level -! while the remainder is non-zero, continue to shift, truncate, +! while the remainder is non-zero, continue to shift, truncate, ! sum, and calculate new remainder do while ((arr_remainder .ne. 0.0_r8) & .and. (ilevel < max_levels(ifld))) @@ -987,9 +987,9 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & enddo ! postprocess integer vector to eliminate potential for overlap in the following -! sums over threads and processes: if value larger than or equal to -! (radix(IX_8)**arr_max_shift), add this 'overlap' to next larger integer in -! vector, resulting in nonoverlapping ranges for each component. Note that +! sums over threads and processes: if value larger than or equal to +! (radix(IX_8)**arr_max_shift), add this 'overlap' to next larger integer in +! vector, resulting in nonoverlapping ranges for each component. Note that ! "ilevel-1==0" corresponds to an extra level used to guarantee that the sums ! over threads and processes do not overflow for ilevel==1. do ilevel=max_levels(ifld),1,-1 @@ -1036,12 +1036,12 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & #if defined (NO_I8) ! Workaround for when i8 is not supported. ! if (detailed_timing) call xicex_timer_start("repro_sum_allr_i4") - call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & + call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & veclth, MPI_INTEGER, MPI_SUM, mpi_comm, ierr) ! if (detailed_timing) call xicex_timer_stop("repro_sum_allr_i4") #else ! if (detailed_timing) call xicex_timer_start("repro_sum_allr_i8") - call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & + call mpi_allreduce (i8_arr_lsum_level, i8_arr_gsum_level, & veclth, MPI_INTEGER8, MPI_SUM, mpi_comm, ierr) ! if (detailed_timing) call xicex_timer_stop("repro_sum_allr_i8") #endif @@ -1056,10 +1056,10 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! alternate. To avoid this, do some arithmetic with integer vectors so that all ! components have the same sign. This should keep relative difference between ! using different integer sizes (e.g. i8 and i4) to machine epsilon -! 3) assignment to X_8 will usually lose accuracy since maximum integer -! size is greater than the max number of 'digits' in r8 value (if xmax_nsummands -! correction not very large). Calculate remainder and add in first (since -! smaller). One correction is sufficient for r8 (53 digits) and i8 (63 digits). +! 3) assignment to X_8 will usually lose accuracy since maximum integer +! size is greater than the max number of 'digits' in r8 value (if xmax_nsummands +! correction not very large). Calculate remainder and add in first (since +! smaller). One correction is sufficient for r8 (53 digits) and i8 (63 digits). ! For r4 (24 digits) may need to correct twice. Code is written in a general ! fashion, to work no matter how many corrections are necessary (assuming ! max_jlevel parameter calculation is correct). @@ -1080,7 +1080,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & if (.not. recompute) then ! preprocess integer vector: -! a) if value larger than or equal to (radix(IX_8)**arr_max_shift), add this 'overlap' +! a) if value larger than or equal to (radix(IX_8)**arr_max_shift), add this 'overlap' ! to next larger integer in vector, resulting in nonoverlapping ranges for each ! component. Note that have "ilevel-1=0" level here as described above. do ilevel=max_levels(ifld),1,-1 @@ -1094,9 +1094,9 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & - IX_8 endif enddo -! b) subtract +/- 1 from larger and add +/- 1 to smaller when necessary -! so that all vector components have the same sign (eliminating loss -! of accuracy arising from difference of large values when +! b) subtract +/- 1 from larger and add +/- 1 to smaller when necessary +! so that all vector components have the same sign (eliminating loss +! of accuracy arising from difference of large values when ! reconstructing r8 sum from integer vector) ilevel = 0 do while ((i8_arr_gsum_level(ioffset+ilevel) .eq. 0_i8) & @@ -1118,7 +1118,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & i8_arr_gsum_level(ioffset+jlevel+1) = i8_arr_gsum_level(ioffset+jlevel+1) & + i8_sign*(i8_radix**arr_max_shift) endif - enddo + enddo endif ! start with maximum shift, and work up to larger values @@ -1131,7 +1131,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & if (i8_arr_gsum_level(ioffset+ilevel) .ne. 0_i8) then jlevel = 1 -! r8 representation of higher order bits in integer +! r8 representation of higher order bits in integer X_8(jlevel) = i8_arr_gsum_level(ioffset+ilevel) LX(jlevel) = exponent(X_8(jlevel)) @@ -1149,7 +1149,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & enddo ! add in contributions, smaller to larger, rescaling for each -! addition to guarantee that exponent of working summand is always +! addition to guarantee that exponent of working summand is always ! larger than minexponent do while (jlevel > 0) if (first) then @@ -1173,7 +1173,7 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & ! apply final exponent correction, scaling first if exponent is too small ! to apply directly corr_exp = curr_exp + exponent(arr_gsum(ifld)) - if (corr_exp .ge. MINEXPONENT(1._r8)) then + if (corr_exp .ge. MINEXPONENT(1._r8)) then arr_gsum(ifld) = set_exponent(arr_gsum(ifld),corr_exp) else RX_8 = set_exponent(arr_gsum(ifld), & @@ -1181,9 +1181,9 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & arr_gsum(ifld) = scale(RX_8,MINEXPONENT(1._r8)) endif -! if validate is .true. and some precision lost, test whether 'too much' +! if validate is .true. and some precision lost, test whether 'too much' ! was lost, due to too loose an upper bound, too stringent a limit on number -! of levels of expansion, cancellation, .... Calculated by comparing lower +! of levels of expansion, cancellation, .... Calculated by comparing lower ! bound on number of sigificant digits with number of digits in 1.0_r8 . if (validate) then if (i8_arr_gsum_level(ioffset-voffset+2) .ne. 0_i8) then @@ -1217,11 +1217,11 @@ subroutine ice_reprosum_int (arr, arr_gsum, nsummands, dsummands, nflds, & end subroutine ice_reprosum_int !======================================================================== -! -! Purpose: +! +! Purpose: !> Test whether distributed sum exceeds tolerance and print out a !> warning message. -! +! !---------------------------------------------------------------------- logical function ice_reprosum_tolExceeded (name, nflds, master, & @@ -1234,11 +1234,11 @@ logical function ice_reprosum_tolExceeded (name, nflds, master, & integer, intent(in) :: nflds !< number of fields logical, intent(in) :: master !< process that will write !< warning messages? - integer, optional, intent(in) :: logunit!< unit warning messages + integer, optional, intent(in) :: logunit!< unit warning messages !< written to real(r8), intent(in) :: rel_diff(2,nflds) !< relative and absolute - !< differences between fixed + !< differences between fixed !< and floating point sums ! Local workspace @@ -1302,12 +1302,12 @@ logical function ice_reprosum_tolExceeded (name, nflds, master, & end function ice_reprosum_tolExceeded !======================================================================== -! -! Purpose: -!> Compute the global sum of each field in "arr" using the indicated -!> communicator with a reproducible yet scalable implementation based +! +! Purpose: +!> Compute the global sum of each field in "arr" using the indicated +!> communicator with a reproducible yet scalable implementation based !> on He and Ding's implementation of the double-double algorithm. -! +! !---------------------------------------------------------------------- subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & @@ -1319,11 +1319,11 @@ subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & integer, intent(in) :: nsummands !< number of local summands integer, intent(in) :: dsummands !< declared first dimension integer, intent(in) :: nflds !< number of fields - real(r8), intent(in) :: arr(dsummands,nflds) + real(r8), intent(in) :: arr(dsummands,nflds) !< input array integer, intent(in) :: mpi_comm !< MPI subcommunicator - real(r8), intent(out):: arr_gsum(nflds) + real(r8), intent(out):: arr_gsum(nflds) !< global sums @@ -1361,8 +1361,8 @@ subroutine ice_reprosum_ddpdd (arr, arr_gsum, nsummands, dsummands, & arr_lsum_dd(ifld) = (0.0_r8,0.0_r8) do isum=1,nsummands - - ! Compute arr(isum,ifld) + arr_lsum_dd(ifld) using Knuth''s + + ! Compute arr(isum,ifld) + arr_lsum_dd(ifld) using Knuth''s ! trick. t1 = arr(isum,ifld) + real(arr_lsum_dd(ifld)) e = t1 - arr(isum,ifld) @@ -1394,11 +1394,11 @@ end subroutine ice_reprosum_ddpdd subroutine DDPDD (dda, ddb, len, itype) !---------------------------------------------------------------------- -! -! Purpose: -! Modification of original codes written by David H. Bailey +! +! Purpose: +! Modification of original codes written by David H. Bailey ! This subroutine computes ddb(i) = dda(i)+ddb(i) -! +! !---------------------------------------------------------------------- ! Arguments @@ -1434,10 +1434,10 @@ end subroutine DDPDD subroutine split_indices(total,num_pieces,ibeg,iend) !---------------------------------------------------------------------- -! -! Purpose: +! +! Purpose: ! Split range into 'num_pieces' -! +! !---------------------------------------------------------------------- ! Arguments diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 index be6e12253..bbe2fd4d1 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 @@ -116,7 +116,7 @@ module ice_timers type (timer_data), dimension(max_timers) :: & all_timers ! timer data for all timers - integer (int_kind) :: & + integer (int_kind) :: & cycles_max ! max clock cycles allowed by system real (dbl_kind) :: & @@ -148,8 +148,8 @@ subroutine init_ice_timers !----------------------------------------------------------------------- ! ! Call F90 intrinsic system_clock to determine clock rate -! and maximum cycles for single-processor runs. If no clock -! available, print message. +! and maximum cycles for single-processor runs. If no clock +! available, print message. ! !----------------------------------------------------------------------- @@ -231,7 +231,7 @@ end subroutine init_ice_timers subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) -! This routine initializes a timer with a given name and returns a +! This routine initializes a timer with a given name and returns a ! timer id. character (*), intent(in) :: & @@ -244,7 +244,7 @@ subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) ! threaded region) integer (int_kind), intent(out) :: & - timer_id ! timer number assigned to this timer + timer_id ! timer number assigned to this timer !----------------------------------------------------------------------- ! @@ -275,7 +275,7 @@ subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) all_timers(n)%name = name_choice all_timers(n)%in_use = .true. all_timers(n)%num_blocks = num_blocks - all_timers(n)%num_nodes = num_nodes + all_timers(n)%num_nodes = num_nodes allocate(all_timers(n)%block_started (num_blocks), & all_timers(n)%block_cycles1 (num_blocks), & @@ -293,7 +293,7 @@ subroutine get_ice_timer(timer_id, name_choice, num_blocks, num_nodes) if (srch_error /= 0) & call abort_ice(subname//'ERROR: Exceeded maximum number of timers') - + !----------------------------------------------------------------------- @@ -334,7 +334,7 @@ subroutine ice_timer_clear(timer_id) all_timers(timer_id)%block_accum_time(:) = c0 else call abort_ice(subname//'ERROR: attempt to reset undefined timer') - + endif !----------------------------------------------------------------------- @@ -396,7 +396,7 @@ subroutine ice_timer_start(timer_id, block_id) !*** another thread. if already started, keep track !*** of number of start requests in order to match !*** start and stop requests - + !$OMP CRITICAL if (.not. all_timers(timer_id)%node_started) then @@ -431,18 +431,18 @@ subroutine ice_timer_start(timer_id, block_id) endif else call abort_ice(subname//'ERROR: attempt to start undefined timer') - + endif !----------------------------------------------------------------------- end subroutine ice_timer_start - + !*********************************************************************** subroutine ice_timer_stop(timer_id, block_id) -! This routine stops a given node timer if appropriate. If block +! This routine stops a given node timer if appropriate. If block ! information is available the appropriate block timer is also stopped. integer (int_kind), intent(in) :: & @@ -513,7 +513,7 @@ subroutine ice_timer_stop(timer_id, block_id) !*** stop node timer if number of requested stops !*** matches the number of starts (to avoid stopping !*** a node timer started by multiple threads) - + cycles1 = all_timers(timer_id)%node_cycles1 !$OMP CRITICAL @@ -566,13 +566,13 @@ subroutine ice_timer_stop(timer_id, block_id) endif else call abort_ice(subname//'ERROR: attempt to stop undefined timer') - + endif !----------------------------------------------------------------------- end subroutine ice_timer_stop - + !*********************************************************************** subroutine ice_timer_print(timer_id,stats) @@ -648,7 +648,7 @@ subroutine ice_timer_print(timer_id,stats) local_time = c0 endif max_time = global_maxval(local_time,distrb_info) - + if (my_task == master_task) then write (nu_diag,timer_format) timer_id, & trim(all_timers(timer_id)%name),max_time @@ -713,7 +713,7 @@ subroutine ice_timer_print(timer_id,stats) if (lrestart_timer) call ice_timer_start(timer_id) else call abort_ice(subname//'ERROR: attempt to print undefined timer') - + endif !----------------------------------------------------------------------- @@ -771,7 +771,7 @@ end subroutine ice_timer_print_all subroutine ice_timer_check(timer_id,block_id) ! This routine checks a given timer by stopping and restarting the -! timer. This is primarily used to periodically accumulate time in +! timer. This is primarily used to periodically accumulate time in ! the timer to prevent timer cycles from wrapping around max_cycles. integer (int_kind), intent(in) :: & diff --git a/cicecore/cicedynB/infrastructure/ice_blocks.F90 b/cicecore/cicedynB/infrastructure/ice_blocks.F90 index 74aba9cb5..fb7483914 100644 --- a/cicecore/cicedynB/infrastructure/ice_blocks.F90 +++ b/cicecore/cicedynB/infrastructure/ice_blocks.F90 @@ -3,8 +3,8 @@ module ice_blocks ! This module contains data types and tools for decomposing a global -! horizontal domain into a set of blocks. It contains a data type -! for describing each block and contains routines for creating and +! horizontal domain into a set of blocks. It contains a data type +! for describing each block and contains routines for creating and ! querying the block decomposition for a global domain. ! ! author: Phil Jones, LANL @@ -46,7 +46,7 @@ module ice_blocks nx_block, ny_block ! x,y dir including ghost ! predefined directions for neighbor id routine - ! Note: the directions that are commented out are implemented in + ! Note: the directions that are commented out are implemented in ! POP but not in CICE. If the tripole cut were in the south ! instead of the north, these would need to be used (and also ! implemented in ice_boundary.F90). @@ -314,11 +314,12 @@ subroutine create_blocks(nx_global, ny_global, ew_boundary_type, & if (debug_blocks) then if (my_task == master_task) then - write(nu_diag,*) 'block i,j locations' + write(nu_diag,*) ' ' + write(nu_diag,'(2a)') subname,' block ID, iblock, jblock Locations:' do n = 1, nblocks_tot - write(nu_diag,*) 'block id, iblock, jblock, tripole:', & + write(nu_diag,'(2a,3i8,l4)') subname,' global block ID, iblock, jblock, tripole:', & all_blocks(n)%block_id, & - all_blocks(n)%iblock, & + all_blocks(n)%iblock, & all_blocks(n)%jblock, & all_blocks(n)%tripole enddo @@ -380,7 +381,7 @@ function ice_blocksGetNbrID(blockID, direction, iBoundary, jBoundary) & ! local variables ! !---------------------------------------------------------------------- - + integer (int_kind) :: & iBlock, jBlock, &! i,j block location of current block inbr, jnbr ! i,j block location of neighboring block @@ -394,6 +395,7 @@ function ice_blocksGetNbrID(blockID, direction, iBoundary, jBoundary) & !---------------------------------------------------------------------- call get_block_parameter(blockID, iblock=iBlock, jblock=jBlock) + nbrID = 0 ! initial default !---------------------------------------------------------------------- ! @@ -422,7 +424,7 @@ function ice_blocksGetNbrID(blockID, direction, iBoundary, jBoundary) & !*** other points if there has been padding or !*** if the block size does not divide the domain !*** evenly - inbr = nblocks_x - iBlock + 1 + inbr = nblocks_x - iBlock + 1 jnbr = -jBlock case default call abort_ice(subname//'ERROR: unknown north boundary') @@ -515,7 +517,7 @@ function ice_blocksGetNbrID(blockID, direction, iBoundary, jBoundary) & !*** other points if there has been padding or !*** if the block size does not divide the domain !*** evenly - inbr = nblocks_x - iBlock + inbr = nblocks_x - iBlock if (inbr == 0) inbr = nblocks_x jnbr = -jBlock case default @@ -554,7 +556,7 @@ function ice_blocksGetNbrID(blockID, direction, iBoundary, jBoundary) & !*** other points if there has been padding or !*** if the block size does not divide the domain !*** evenly - inbr = nblocks_x - iBlock + 2 + inbr = nblocks_x - iBlock + 2 if (inbr > nblocks_x) inbr = 1 jnbr = -jBlock case default @@ -691,7 +693,7 @@ function ice_blocksGetNbrID(blockID, direction, iBoundary, jBoundary) & !*** other points if there has been padding or !*** if the block size does not divide the domain !*** evenly - inbr = nblocks_x - iBlock - 1 + inbr = nblocks_x - iBlock - 1 if (inbr <= 0) inbr = inbr + nblocks_x jnbr = -jBlock case default @@ -799,7 +801,7 @@ end function get_block !********************************************************************** - subroutine get_block_parameter(block_id, local_id, & + subroutine get_block_parameter(block_id, local_id, & ilo, ihi, jlo, jhi, & iblock, jblock, tripole, & i_glob, j_glob) diff --git a/cicecore/cicedynB/infrastructure/ice_domain.F90 b/cicecore/cicedynB/infrastructure/ice_domain.F90 index 79f5bcb9a..ac56356e5 100644 --- a/cicecore/cicedynB/infrastructure/ice_domain.F90 +++ b/cicecore/cicedynB/infrastructure/ice_domain.F90 @@ -18,7 +18,7 @@ module ice_domain use ice_kinds_mod use ice_constants, only: shlat, nhlat use ice_communicate, only: my_task, master_task, get_num_procs, & - add_mpi_barriers + add_mpi_barriers, ice_barrier use ice_broadcast, only: broadcast_scalar, broadcast_array use ice_blocks, only: block, get_block, create_blocks, nghost, & nblocks_x, nblocks_y, nblocks_tot, nx_block, ny_block, debug_blocks @@ -26,7 +26,7 @@ module ice_domain use ice_boundary, only: ice_halo use ice_exit, only: abort_ice use ice_fileunits, only: nu_nml, nml_filename, nu_diag, & - get_fileunit, release_fileunit + get_fileunit, release_fileunit, flush_fileunit use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_query_parameters @@ -79,7 +79,7 @@ module ice_domain distribution_type, &! method to use for distributing blocks ! 'cartesian', 'roundrobin', 'sectrobin', 'sectcart' ! 'rake', 'spacecurve', etc - distribution_wght ! method for weighting work per block + distribution_wght ! method for weighting work per block ! 'block' = POP default configuration ! 'blockall' = no land block elimination ! 'latitude' = no. ocean points * |lat| @@ -326,6 +326,7 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) ig,jg ,&! global indices igm1,igp1,jgm1,jgp1,&! global indices ninfo ,&! ice_distributionGet check + np, nlb, m ,&! debug blocks temporaries work_unit ,&! size of quantized work unit #ifdef USE_NETCDF fid ,&! file id @@ -357,7 +358,7 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) ! ! check that there are at least nghost+1 rows or columns of land cells ! for closed boundary conditions (otherwise grid lengths are zero in -! cells neighboring ocean points). +! cells neighboring ocean points). ! !---------------------------------------------------------------------- @@ -526,12 +527,12 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) KMTG(igm1,jg ) > puny .or. KMTG(igp1,jg ) > puny .or. & KMTG(ig ,jgp1) > puny .or. KMTG(ig ,jgm1) > puny) .and. & (ULATG(ig,jg) < shlat/rad_to_deg .or. & - ULATG(ig,jg) > nhlat/rad_to_deg) ) & + ULATG(ig,jg) > nhlat/rad_to_deg) ) & nocn(n) = nocn(n) + flat(ig,jg) else if (KMTG(ig,jg) > puny .and. & (ULATG(ig,jg) < shlat/rad_to_deg .or. & - ULATG(ig,jg) > nhlat/rad_to_deg) ) & + ULATG(ig,jg) > nhlat/rad_to_deg) ) & nocn(n) = nocn(n) + flat(ig,jg) endif endif @@ -543,7 +544,7 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) !*** points, so where the block is not completely land, !*** reset nocn to be the full size of the block - ! use processor_shape = 'square-pop' and distribution_wght = 'block' + ! use processor_shape = 'square-pop' and distribution_wght = 'block' ! to make CICE and POP decompositions/distributions identical. #ifdef CICE_IN_NEMO @@ -596,8 +597,41 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) call create_local_block_ids(blocks_ice, distrb_info) + ! write out block distribution ! internal check of icedistributionGet as part of verification process if (debug_blocks) then + + call flush_fileunit(nu_diag) + call ice_barrier() + if (my_task == master_task) then + write(nu_diag,*) ' ' + write(nu_diag,'(2a)') subname, ' Blocks by Proc:' + endif + call ice_distributionGet(distrb_info, nprocs=np, numLocalBlocks=nlb) + do m = 1, np + if (m == my_task+1) then + do n=1,nlb + write(nu_diag,'(2a,3i8)') & + subname,' my_task, local block ID, global block ID: ', & + my_task, n, distrb_info%blockGlobalID(n) + enddo + call flush_fileunit(nu_diag) + endif + call ice_barrier() + enddo + + if (my_task == master_task) then + write(nu_diag,*) ' ' + write(nu_diag,'(2a)') subname, ' Blocks by Global Block ID:' + do m = 1, nblocks_tot + write(nu_diag,'(2a,3i8)') & + subname,' global block id, proc, local block ID: ', & + m, distrb_info%blockLocation(m), distrb_info%blockLocalID(m) + enddo + call flush_fileunit(nu_diag) + endif + call ice_barrier() + call ice_distributionGet(distrb_info, nprocs=ninfo) if (ninfo /= distrb_info%nprocs) & call abort_ice(subname//' ice_distributionGet nprocs ERROR', file=__FILE__, line=__LINE__) @@ -635,8 +669,11 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) deallocate(blkinfo) - if (my_task == master_task) & - write(nu_diag,*) subname,' ice_distributionGet checks pass' + if (my_task == master_task) then + write(nu_diag,*) ' ' + write(nu_diag,'(2a)') subname,' ice_distributionGet checks pass' + write(nu_diag,*) ' ' + endif endif if (associated(blocks_ice)) then diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 84f9f6547..523c7ea2c 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -14,7 +14,7 @@ ! 2006: Converted to free source form (F90) by Elizabeth Hunke ! 2007: Option to read from netcdf files (A. Keen, Met Office) ! Grid reading routines reworked by E. Hunke for boundary values -! 2021: Add N (center of north face) and E (center of east face) grids +! 2021: Add N (center of north face) and E (center of east face) grids ! to support C and CD solvers. Defining T at center of cells, U at ! NE corner, N at center of top face, E at center of right face. ! All cells are quadrilaterals with NE, E, and N associated with @@ -55,7 +55,7 @@ module ice_grid kmt_type , & ! options are file, default, boxislands bathymetry_file, & ! input bathymetry for seabed stress bathymetry_format, & ! bathymetry file format (default or pop) - grid_spacing , & ! default of 30.e3m or set by user in namelist + grid_spacing , & ! default of 30.e3m or set by user in namelist grid_ice , & ! Underlying model grid structure (A, B, C, CD) grid_ice_thrm, & ! ocean forcing grid for thermo fields (T, U, N, E) grid_ice_dynu, & ! ocean forcing grid for dyn U fields (T, U, N, E) @@ -111,12 +111,12 @@ module ice_grid G_HTN ! length of northern edge of T-cell (global ext.) real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - cyp , & ! 1.5*HTE(i,j)-0.5*HTW(i,j) = 1.5*HTE(i,j)-0.5*HTE(i-1,j) + cyp , & ! 1.5*HTE(i,j)-0.5*HTW(i,j) = 1.5*HTE(i,j)-0.5*HTE(i-1,j) cxp , & ! 1.5*HTN(i,j)-0.5*HTS(i,j) = 1.5*HTN(i,j)-0.5*HTN(i,j-1) - cym , & ! 0.5*HTE(i,j)-1.5*HTW(i,j) = 0.5*HTE(i,j)-1.5*HTE(i-1,j) - cxm , & ! 0.5*HTN(i,j)-1.5*HTS(i,j) = 0.5*HTN(i,j)-1.5*HTN(i,j-1) + cym , & ! 0.5*HTE(i,j)-1.5*HTW(i,j) = 0.5*HTE(i,j)-1.5*HTE(i-1,j) + cxm , & ! 0.5*HTN(i,j)-1.5*HTS(i,j) = 0.5*HTN(i,j)-1.5*HTN(i,j-1) dxhy , & ! 0.5*(HTE(i,j) - HTW(i,j)) = 0.5*(HTE(i,j) - HTE(i-1,j)) - dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) + dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & ratiodxN , & ! - dxN(i+1,j) / dxN(i,j) @@ -157,14 +157,14 @@ module ice_grid dimension (:,:,:,:,:), allocatable, public :: & mne, & ! matrices used for coordinate transformations in remapping mnw, & ! ne = northeast corner, nw = northwest, etc. - mse, & + mse, & msw ! masks real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & hm , & ! land/boundary mask, thickness (T-cell) bm , & ! task/block id - uvm , & ! land/boundary mask (U-cell) + uvm , & ! land/boundary mask (U-cell) npm , & ! land/boundary mask (N-cell) epm , & ! land/boundary mask (E-cell) kmt ! ocean topography mask for bathymetry (T-cell) @@ -205,7 +205,7 @@ module ice_grid !======================================================================= ! -! Allocate space for all variables +! Allocate space for all variables ! subroutine alloc_grid @@ -311,11 +311,11 @@ end subroutine alloc_grid !======================================================================= ! Distribute blocks across processors. The distribution is optimized -! based on latitude and topography, contained in the ULAT and KMT arrays. +! based on latitude and topography, contained in the ULAT and KMT arrays. ! ! authors: William Lipscomb and Phil Jones, LANL - subroutine init_grid1 + subroutine init_grid1 use ice_blocks, only: nx_block, ny_block use ice_broadcast, only: broadcast_array @@ -487,7 +487,7 @@ subroutine init_grid2 call popgrid_nc ! read POP grid lengths from nc file else call popgrid ! read POP grid lengths directly - endif + endif #ifdef CESMCOUPLED elseif (trim(grid_type) == 'latlon') then call latlongrid ! lat lon grid for sequential CESM (CAM mode) @@ -507,7 +507,7 @@ subroutine init_grid2 !$OMP PARALLEL DO ORDERED PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks if (my_task == master_task) then - !$OMP ORDERED + !$OMP ORDERED if (iblk == 1) then call omp_get_schedule(ompsk,ompcs) write(nu_diag,*) '' @@ -516,7 +516,7 @@ subroutine init_grid2 endif write(nu_diag,*) subname,' block, thread = ',iblk,OMP_GET_THREAD_NUM() call flush_fileunit(nu_diag) - !$OMP END ORDERED + !$OMP END ORDERED endif enddo !$OMP END PARALLEL DO @@ -581,8 +581,8 @@ subroutine init_grid2 cyp(i,j,iblk) = (c1p5*HTE(i,j,iblk) - p5*HTE(i-1,j,iblk)) cxp(i,j,iblk) = (c1p5*HTN(i,j,iblk) - p5*HTN(i,j-1,iblk)) ! match order of operations in cyp, cxp for tripole grids - cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) - cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) + cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) + cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) enddo enddo @@ -704,7 +704,7 @@ subroutine init_grid2 enddo !$OMP END PARALLEL DO endif ! regional - + call ice_timer_start(timer_bound) call ice_HaloUpdate (ANGLET, halo_info, & field_loc_center, field_type_angle, & @@ -760,7 +760,7 @@ end subroutine init_grid2 !======================================================================= -! POP displaced pole grid and land mask (or tripole). +! POP displaced pole grid and land mask (or tripole). ! Grid record number, field and units are: \\ ! (1) ULAT (radians) \\ ! (2) ULON (radians) \\ @@ -768,7 +768,7 @@ end subroutine init_grid2 ! (4) HTE (cm) \\ ! (5) HUS (cm) \\ ! (6) HUW (cm) \\ -! (7) ANGLE (radians) +! (7) ANGLE (radians) ! ! Land mask record number and field is (1) KMT. ! @@ -809,7 +809,7 @@ subroutine popgrid !----------------------------------------------------------------- call ice_read(nu_kmt,1,work1,'ida4',diag, & - field_loc=field_loc_center, & + field_loc=field_loc_center, & field_type=field_type_scalar) hm (:,:,:) = c0 @@ -838,14 +838,14 @@ subroutine popgrid allocate(work_g1(nx_global,ny_global)) call ice_read_global(nu_grid,1,work_g1,'rda8',.true.) ! ULAT - call gridbox_verts(work_g1,latt_bounds) + call gridbox_verts(work_g1,latt_bounds) call scatter_global(ULAT, work_g1, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) call ice_HaloExtrapolate(ULAT, distrb_info, & ew_boundary_type, ns_boundary_type) call ice_read_global(nu_grid,2,work_g1,'rda8',.true.) ! ULON - call gridbox_verts(work_g1,lont_bounds) + call gridbox_verts(work_g1,lont_bounds) call scatter_global(ULON, work_g1, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) call ice_HaloExtrapolate(ULON, distrb_info, & @@ -857,7 +857,7 @@ subroutine popgrid !----------------------------------------------------------------- ! cell dimensions - ! calculate derived quantities from global arrays to preserve + ! calculate derived quantities from global arrays to preserve ! information on boundaries !----------------------------------------------------------------- @@ -926,7 +926,7 @@ subroutine popgrid_nc type (block) :: & this_block ! block information for current block - + integer(kind=int_kind) :: & varid integer (kind=int_kind) :: & @@ -952,7 +952,7 @@ subroutine popgrid_nc fieldname='kmt' call ice_read_nc(fid_kmt,1,fieldname,work1,diag, & - field_loc=field_loc_center, & + field_loc=field_loc_center, & field_type=field_type_scalar) hm (:,:,:) = c0 @@ -982,7 +982,7 @@ subroutine popgrid_nc fieldname='ulat' call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! ULAT - call gridbox_verts(work_g1,latt_bounds) + call gridbox_verts(work_g1,latt_bounds) call scatter_global(ULAT, work_g1, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) call ice_HaloExtrapolate(ULAT, distrb_info, & @@ -990,7 +990,7 @@ subroutine popgrid_nc fieldname='ulon' call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! ULON - call gridbox_verts(work_g1,lont_bounds) + call gridbox_verts(work_g1,lont_bounds) call scatter_global(ULON, work_g1, master_task, distrb_info, & field_loc_NEcorner, field_type_scalar) call ice_HaloExtrapolate(ULON, distrb_info, & @@ -1017,7 +1017,7 @@ subroutine popgrid_nc endif call broadcast_scalar(l_readCenter,master_task) if (l_readCenter) then - call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) + call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) call scatter_global(ANGLET, work_g1, master_task, distrb_info, & field_loc_center, field_type_angle) where (ANGLET > pi) ANGLET = pi @@ -1033,7 +1033,7 @@ subroutine popgrid_nc endif !----------------------------------------------------------------- ! cell dimensions - ! calculate derived quantities from global arrays to preserve + ! calculate derived quantities from global arrays to preserve ! information on boundaries !----------------------------------------------------------------- @@ -1060,7 +1060,7 @@ end subroutine popgrid_nc #ifdef CESMCOUPLED !======================================================================= -! Read in kmt file that matches CAM lat-lon grid and has single column +! Read in kmt file that matches CAM lat-lon grid and has single column ! functionality ! author: Mariana Vertenstein ! 2007: Elizabeth Hunke upgraded to netcdf90 and cice ncdf calls @@ -1077,8 +1077,8 @@ subroutine latlongrid #endif integer (kind=int_kind) :: & - i, j, iblk - + i, j, iblk + integer (kind=int_kind) :: & ni, nj, ncid, dimid, varid, ier @@ -1106,7 +1106,7 @@ subroutine latlongrid status ! status flag real (kind=dbl_kind), allocatable :: & - lats(:),lons(:),pos_lons(:), glob_grid(:,:) ! temporaries + lats(:),lons(:),pos_lons(:), glob_grid(:,:) ! temporaries real (kind=dbl_kind) :: & pos_scmlon,& ! temporary @@ -1175,12 +1175,12 @@ subroutine latlongrid status = nf90_get_var(ncid, varid, glob_grid, start3, count3) if (status /= nf90_noerr) call abort_ice (subname//' get_var yc') do j = 1,nj - lats(j) = glob_grid(1,j) + lats(j) = glob_grid(1,j) end do - + ! convert lons array and scmlon to 0,360 and find index of value closest to 0 ! and obtain single-column longitude/latitude indices to retrieve - + pos_lons(:)= mod(lons(:) + 360._dbl_kind,360._dbl_kind) pos_scmlon = mod(scmlon + 360._dbl_kind,360._dbl_kind) start(1) = (MINLOC(abs(pos_lons-pos_scmlon),dim=1)) @@ -1267,7 +1267,7 @@ subroutine latlongrid ! Calculate various geometric 2d arrays ! The U grid (velocity) is not used when run with sequential CAM ! because we only use thermodynamic sea ice. However, ULAT is used - ! in the default initialization of CICE so we calculate it here as + ! in the default initialization of CICE so we calculate it here as ! a "dummy" so that CICE will initialize with ice. If a no ice ! initialization is OK (or desired) this can be commented out and ! ULAT will remain 0 as specified above. ULAT is located at the @@ -1298,12 +1298,12 @@ subroutine latlongrid uarear(i,j,iblk) = c1/uarea(i,j,iblk) if (single_column) then - ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/nj) + ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/nj) else if (ny_global == 1) then ULAT (i,j,iblk) = TLAT(i,j,iblk) else - ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/ny_global) + ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/ny_global) endif endif ULON (i,j,iblk) = c0 @@ -1311,9 +1311,9 @@ subroutine latlongrid NLAT (i,j,iblk) = c0 ELON (i,j,iblk) = c0 ELAT (i,j,iblk) = c0 - ANGLE (i,j,iblk) = c0 + ANGLE (i,j,iblk) = c0 - ANGLET(i,j,iblk) = c0 + ANGLET(i,j,iblk) = c0 HTN (i,j,iblk) = 1.e36_dbl_kind HTE (i,j,iblk) = 1.e36_dbl_kind dxT (i,j,iblk) = 1.e36_dbl_kind @@ -1351,13 +1351,12 @@ end subroutine latlongrid subroutine rectgrid - use ice_blocks, only: nx_block, ny_block use ice_constants, only: c0, c1, c2, radius, cm_to_m, & field_loc_center, field_loc_NEcorner, field_type_scalar use ice_domain, only: close_boundaries integer (kind=int_kind) :: & - i, j, iblk, & + i, j, & imid, jmid real (kind=dbl_kind) :: & @@ -1552,8 +1551,8 @@ subroutine grid_boxislands_kmt (work) if (nxb < 1 .or. nyb < 1) & call abort_ice(subname//'ERROR: requires larger grid size') - - ! initialize work area as all ocean (c1). + + ! initialize work area as all ocean (c1). work(:,:) = c1 ! now add land points (c0) @@ -1956,7 +1955,7 @@ subroutine primary_grid_lengths_HTE(work_g) do i = 1, nx_global ! assume cyclic; noncyclic will be handled during scatter im1 = i-1 - if (i == 1) im1 = nx_global + if (i == 1) im1 = nx_global work_g2(i,j) = p25*(work_g(i,j) + work_g(im1,j) + work_g(i,j+1) + work_g(im1,j+1)) ! dyN enddo enddo @@ -1965,7 +1964,7 @@ subroutine primary_grid_lengths_HTE(work_g) do i = 1, nx_global ! assume cyclic; noncyclic will be handled during scatter im1 = i-1 - if (i == 1) im1 = nx_global + if (i == 1) im1 = nx_global work_g2(i,ny_global) = p5*(c2*work_g(i ,ny_global-1) - work_g(i ,ny_global-2) + & c2*work_g(im1,ny_global-1) - work_g(im1,ny_global-2)) ! dyN enddo @@ -2232,7 +2231,7 @@ subroutine Tlatlon ! the prior atan2 call ??? not sure what's going on. #if (1 == 1) enddo ! i - enddo ! j + enddo ! j enddo ! iblk !$OMP END PARALLEL DO @@ -2304,9 +2303,9 @@ subroutine Tlatlon ! ELAT in radians North ELAT(i,j,iblk) = asin(tz) - + enddo ! i - enddo ! j + enddo ! j enddo ! iblk !$OMP END PARALLEL DO @@ -2827,12 +2826,12 @@ subroutine grid_average_X2YS(dir,work1,wght1,mask1,work2) do i = ilo, ihi wtmp = (mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + mask1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + mask1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) if (wtmp /= c0) & work2(i,j,iblk) = (mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + mask1(i-1,j ,iblk)*work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + mask1(i-1,j-1,iblk)*work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & / wtmp enddo @@ -2876,12 +2875,12 @@ subroutine grid_average_X2YS(dir,work1,wght1,mask1,work2) do j = jlo, jhi do i = ilo, ihi wtmp = (mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + mask1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + mask1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & + mask1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) if (wtmp /= c0) & work2(i,j,iblk) = (mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + mask1(i+1,j-1,iblk)*work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + mask1(i+1,j-1,iblk)*work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + mask1(i+1,j ,iblk)*work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & / wtmp @@ -3057,12 +3056,12 @@ subroutine grid_average_X2YA(dir,work1,wght1,work2) do i = ilo, ihi wtmp = (wght1(i ,j ,iblk) & + wght1(i-1,j ,iblk) & - + wght1(i ,j-1,iblk) & + + wght1(i ,j-1,iblk) & + wght1(i-1,j-1,iblk)) if (wtmp /= c0) & work2(i,j,iblk) = (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & / wtmp enddo @@ -3106,12 +3105,12 @@ subroutine grid_average_X2YA(dir,work1,wght1,work2) do j = jlo, jhi do i = ilo, ihi wtmp = (wght1(i ,j-1,iblk) & - + wght1(i+1,j-1,iblk) & + + wght1(i+1,j-1,iblk) & + wght1(i ,j ,iblk) & + wght1(i+1,j ,iblk)) if (wtmp /= c0) & work2(i,j,iblk) = (work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & / wtmp @@ -3282,7 +3281,7 @@ subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) work2(i,j,iblk) = p25 * & (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & + work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & / wght2(i ,j ,iblk) enddo @@ -3323,7 +3322,7 @@ subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) do i = ilo, ihi work2(i,j,iblk) = p25 * & (work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & / wght2(i ,j ,iblk) @@ -4177,7 +4176,7 @@ subroutine gridbox_verts(work_g,vbounds) if (my_task == master_task) then do j = 1, ny_global do i = 2, nx_global - work_g2(i,j) = work_g(i-1,j ) * rad_to_deg + work_g2(i,j) = work_g(i-1,j ) * rad_to_deg enddo enddo ! extrapolate @@ -4383,13 +4382,13 @@ end subroutine get_bathymetry_popfile !======================================================================= -! Read bathymetry data for seabed stress calculation (grounding scheme for -! landfast ice) in CICE stand-alone mode. When CICE is in coupled mode -! (e.g. CICE-NEMO), hwater should be uptated at each time level so that +! Read bathymetry data for seabed stress calculation (grounding scheme for +! landfast ice) in CICE stand-alone mode. When CICE is in coupled mode +! (e.g. CICE-NEMO), hwater should be uptated at each time level so that ! it varies with ocean dynamics. ! ! author: Fred Dupont, CMC - + subroutine read_seabedstress_bathy ! use module @@ -4399,7 +4398,7 @@ subroutine read_seabedstress_bathy ! local variables integer (kind=int_kind) :: & fid_init ! file id for netCDF init file - + character (char_len_long) :: & ! input data file names fieldname @@ -4433,7 +4432,7 @@ subroutine read_seabedstress_bathy endif end subroutine read_seabedstress_bathy - + !======================================================================= end module ice_grid diff --git a/cicecore/cicedynB/infrastructure/ice_memusage.F90 b/cicecore/cicedynB/infrastructure/ice_memusage.F90 index 19e7dfb15..8dca4e621 100644 --- a/cicecore/cicedynB/infrastructure/ice_memusage.F90 +++ b/cicecore/cicedynB/infrastructure/ice_memusage.F90 @@ -11,11 +11,11 @@ MODULE ice_memusage implicit none private - + ! PUBLIC: Public interfaces public :: ice_memusage_getusage, & - ice_memusage_init, & + ice_memusage_init, & ice_memusage_print logical(log_kind), public :: memory_stats @@ -39,22 +39,20 @@ subroutine ice_memusage_init(iunit) !----- arguments ----- integer, optional :: iunit !< output unit number for optional writes - + !----- local ----- - ! --- Memory stats --- + ! --- Memory stats --- integer :: msize ! memory size (high water) - integer :: mrss ! resident size (current memory use) - integer :: msize0,msize1 ! temporary size integer :: mrss0,mrss1,mrss2 ! temporary rss integer :: mshare,mtext,mdatastack integer :: ierr - + integer :: ice_memusage_gptl real(dbl_kind),allocatable :: mem_tmp(:) character(*),parameter :: subname = '(ice_memusage_init)' - + !--------------------------------------------------- ! return if memory_stats are off @@ -121,7 +119,7 @@ subroutine ice_memusage_print(iunit,string) integer, intent(in) :: iunit !< unit number to write to character(len=*),optional, intent(in) :: string !< optional string - !----- local --- + !----- local --- real(dbl_kind) :: msize,mrss character(len=128) :: lstring character(*),parameter :: subname = '(ice_memusage_print)' diff --git a/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c b/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c index ec9c2c1d8..309c8824b 100644 --- a/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c +++ b/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c @@ -28,7 +28,7 @@ ** Author: Jim Rosinski ** Credit to Chuck Bardeen for MACOS section (__APPLE__ ifdef) ** -** get_memusage: +** get_memusage: ** ** Designed to be called from Fortran, returns information about memory ** usage in each of 5 input int* args. On Linux read from the /proc @@ -133,7 +133,7 @@ int ice_memusage_gptl (int *size, int *rss, int *share, int *text, int *datastac #endif long long total; int node_config; - + /* memory available */ #if defined(BGP) Kernel_GetPersonality(&pers, sizeof(pers)); @@ -195,7 +195,7 @@ int ice_memusage_gptl (int *size, int *rss, int *share, int *text, int *datastac ** arguments, close the file and return. */ - ret = fscanf (fd, "%d %d %d %d %d %d %d", + ret = fscanf (fd, "%d %d %d %d %d %d %d", size, rss, share, text, datastack, &dum, &dum); ret = fclose (fd); return 0; @@ -203,9 +203,9 @@ int ice_memusage_gptl (int *size, int *rss, int *share, int *text, int *datastac #elif (defined __APPLE__) FILE *fd; - char cmd[60]; + char cmd[60]; int pid = (int) getpid (); - + sprintf (cmd, "ps -o vsz -o rss -o tsiz -p %d | grep -v RSS", pid); fd = popen (cmd, "r"); @@ -224,7 +224,7 @@ int ice_memusage_gptl (int *size, int *rss, int *share, int *text, int *datastac if (getrusage (RUSAGE_SELF, &usage) < 0) return -1; - + *size = -1; *rss = usage.ru_maxrss; *share = -1; diff --git a/cicecore/cicedynB/infrastructure/ice_read_write.F90 b/cicecore/cicedynB/infrastructure/ice_read_write.F90 index d5cbe1768..b9074d8f6 100644 --- a/cicecore/cicedynB/infrastructure/ice_read_write.F90 +++ b/cicecore/cicedynB/infrastructure/ice_read_write.F90 @@ -25,7 +25,7 @@ module ice_read_write use ice_fileunits, only: nu_diag #ifdef USE_NETCDF - use netcdf + use netcdf #endif implicit none @@ -33,7 +33,7 @@ module ice_read_write private integer (kind=int_kind), parameter, private :: & - bits_per_byte = 8 ! number of bits per byte. + bits_per_byte = 8 ! number of bits per byte. ! used to determine RecSize in ice_open public :: ice_open, & @@ -148,7 +148,7 @@ subroutine ice_open_ext(nu, filename, nbits) integer (kind=int_kind), intent(in) :: & nu , & ! unit number nbits ! no. of bits per variable (0 for sequential access) - + integer (kind=int_kind) :: RecSize, nbytes character (*) :: filename @@ -468,9 +468,9 @@ subroutine ice_read_xyzt(nu, nrec, work, atype, diag, & if (present(field_loc)) then call scatter_global(work(:,:,k,:), work_g4(:,:,k), master_task, distrb_info, & field_loc, field_type) - + else - + call scatter_global(work(:,:,k,:), work_g4(:,:,k), master_task, distrb_info, & field_loc_noupdate, field_type_noupdate) endif @@ -791,11 +791,11 @@ subroutine ice_write_xyt(nu, nrec, work, atype, diag) elseif (atype == 'ida8') then allocate(work_gi8(nx_global,ny_global)) work_gi8 = nint(work_g1) - write(nu,rec=nrec) work_gi8 + write(nu,rec=nrec) work_gi8 deallocate(work_gi8) elseif (atype == 'rda4') then allocate(work_gr(nx_global,ny_global)) - work_gr = work_g1 + work_gr = real(work_g1,real_kind) write(nu,rec=nrec) work_gr deallocate(work_gr) elseif (atype == 'rda8') then @@ -824,7 +824,7 @@ end subroutine ice_write_xyt !======================================================================= -! Writes an unformatted file +! Writes an unformatted file ! work is a real array, atype indicates the format of the data subroutine ice_write_xyzt(nu, nrec, work, atype, diag) @@ -895,11 +895,11 @@ subroutine ice_write_xyzt(nu, nrec, work, atype, diag) elseif (atype == 'ida8') then allocate(work_gi9(nx_global,ny_global,nblyr+2)) work_gi9 = nint(work_g4) - write(nu,rec=nrec) work_gi9 + write(nu,rec=nrec) work_gi9 deallocate(work_gi9) elseif (atype == 'rda4') then allocate(work_gr3(nx_global,ny_global,nblyr+2)) - work_gr3 = work_g4 + work_gr3 = real(work_g4,real_kind) write(nu,rec=nrec) work_gr3 deallocate(work_gr3) elseif (atype == 'rda8') then @@ -1002,11 +1002,11 @@ subroutine ice_write_ext(nu, nrec, work, atype, diag) elseif (atype == 'ida8') then allocate(work_gi8(nx,ny)) work_gi8 = nint(work_g1) - write(nu,rec=nrec) work_gi8 + write(nu,rec=nrec) work_gi8 deallocate(work_gi8) elseif (atype == 'rda4') then allocate(work_gr(nx,ny)) - work_gr = work_g1 + work_gr = real(work_g1,real_kind) write(nu,rec=nrec) work_gr deallocate(work_gr) elseif (atype == 'rda8') then @@ -1040,7 +1040,7 @@ end subroutine ice_write_ext subroutine ice_open_nc(filename, fid) - character (char_len_long), intent(in) :: & + character (char_len_long), intent(in) :: & filename ! netCDF filename integer (kind=int_kind), intent(out) :: & @@ -1052,7 +1052,7 @@ subroutine ice_open_nc(filename, fid) #ifdef USE_NETCDF integer (kind=int_kind) :: & - status ! status variable from netCDF routine + status ! status variable from netCDF routine if (my_task == master_task) then @@ -1089,12 +1089,12 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec ! record number + nrec ! record number logical (kind=log_kind), intent(in) :: & diag ! if true, write diagnostic output - character (len=*), intent(in) :: & + character (len=*), intent(in) :: & varname ! field name in netcdf file real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(out) :: & @@ -1113,13 +1113,13 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid , & ! variable id status , & ! status output from netcdf routines ndims , & ! number of dimensions dimlen ! dimension size - integer (kind=int_kind), dimension(10) :: & + integer (kind=int_kind), dimension(10) :: & dimids ! generic size dimids real (kind=dbl_kind) :: & @@ -1200,12 +1200,12 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & endif !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- if (orca_halogrid .and. .not. present(restart_ext)) then status = nf90_get_var( fid, varid, work_g2, & - start=(/1,1,lnrec/), & + start=(/1,1,lnrec/), & count=(/nx_global+2,ny_global+1,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1214,7 +1214,7 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & work_g1 = work_g2(2:nx_global+1,1:ny_global) else status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,lnrec/), & + start=(/1,1,lnrec/), & count=(/nx,ny,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1230,8 +1230,8 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & !------------------------------------------------------------------- if (my_task==master_task .and. diag) then - write(nu_diag,'(2a,i8,a,i8,2a)') & - subname,' fid= ',fid, ', lnrec = ',lnrec, & + write(nu_diag,'(2a,i8,a,i8,2a)') & + subname,' fid= ',fid, ', lnrec = ',lnrec, & ', varname = ',trim(varname) ! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) ! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -1294,9 +1294,9 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec ! record number + nrec ! record number - character (len=*), intent(in) :: & + character (len=*), intent(in) :: & varname ! field name in netcdf file logical (kind=log_kind), intent(in) :: & @@ -1318,14 +1318,14 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & n, & ! ncat index varid , & ! variable id status , & ! status output from netcdf routines ndims , & ! number of dimensions dimlen ! dimension size - integer (kind=int_kind), dimension(10) :: & + integer (kind=int_kind), dimension(10) :: & dimids ! generic size dimids real (kind=dbl_kind) :: & @@ -1407,12 +1407,12 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & endif !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- if (orca_halogrid .and. .not. present(restart_ext)) then status = nf90_get_var( fid, varid, work_g2, & - start=(/1,1,1,lnrec/), & + start=(/1,1,1,lnrec/), & count=(/nx_global+2,ny_global+1,ncat,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1421,7 +1421,7 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & work_g1 = work_g2(2:nx_global+1,1:ny_global,:) else status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,1,lnrec/), & + start=(/1,1,1,lnrec/), & count=(/nx,ny,ncat,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1437,8 +1437,8 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & !------------------------------------------------------------------- if (my_task==master_task .and. diag) then - write(nu_diag,'(2a,i8,a,i8,2a)') & - subname,' fid= ',fid, ', lnrec = ',lnrec, & + write(nu_diag,'(2a,i8,a,i8,2a)') & + subname,' fid= ',fid, ', lnrec = ',lnrec, & ', varname = ',trim(varname) ! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) ! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -1506,14 +1506,14 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & field_loc, field_type, restart_ext) use ice_fileunits, only: nu_diag - use ice_domain_size, only: nfsd, nfreq + use ice_domain_size, only: nfreq use ice_gather_scatter, only: scatter_global, scatter_global_ext integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec ! record number + nrec ! record number - character (len=*), intent(in) :: & + character (len=*), intent(in) :: & varname ! field name in netcdf file logical (kind=log_kind), intent(in) :: & @@ -1533,7 +1533,7 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & ! local variables ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid, & ! variable id status, & ! status output from netcdf routines ndim, nvar, & ! sizes of netcdf file @@ -1542,7 +1542,7 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & ndims, & ! number of dimensions dimlen ! dimension size - integer (kind=int_kind), dimension(10) :: & + integer (kind=int_kind), dimension(10) :: & dimids ! generic size dimids real (kind=dbl_kind) :: & @@ -1627,12 +1627,12 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & endif !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- if (orca_halogrid .and. .not. present(restart_ext)) then status = nf90_get_var( fid, varid, work_g2, & - start=(/1,1,1,lnrec/), & + start=(/1,1,1,lnrec/), & count=(/nx_global+2,ny_global+1,nfreq,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1641,7 +1641,7 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & work_g1 = work_g2(2:nx_global+1,1:ny_global,:) else status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,1,lnrec/), & + start=(/1,1,1,lnrec/), & count=(/nx,ny,nfreq,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1725,12 +1725,12 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec ! record number + nrec ! record number logical (kind=log_kind), intent(in) :: & diag ! if true, write diagnostic output - character (char_len), intent(in) :: & + character (char_len), intent(in) :: & varname ! field name in netcdf file integer (kind=int_kind), optional, intent(in) :: & @@ -1746,7 +1746,7 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid, & ! netcdf id for field status, & ! status output from netcdf routines ndim, nvar, & ! sizes of netcdf file @@ -1754,7 +1754,7 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & ndims, & ! number of dimensions dimlen ! dimension size - integer (kind=int_kind), dimension(10) :: & + integer (kind=int_kind), dimension(10) :: & dimids ! generic size dimids real (kind=dbl_kind), dimension(1) :: & @@ -1805,8 +1805,8 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & ! Read point variable !-------------------------------------------------------------- - status = nf90_get_var(fid, varid, workg, & - start= (/ lnrec /), & + status = nf90_get_var(fid, varid, workg, & + start= (/ lnrec /), & count=(/ 1 /)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -1819,8 +1819,8 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & !------------------------------------------------------------------- if (my_task==master_task .and. diag) then - write(nu_diag,'(2a,i8,a,i8,2a)') & - subname,' fid= ',fid, ', lnrec = ',lnrec, & + write(nu_diag,'(2a,i8,a,i8,2a)') & + subname,' fid= ',fid, ', lnrec = ',lnrec, & ', varname = ',trim(varname) status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -1830,7 +1830,7 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & enddo endif - work = workg(1) + work = workg(1) #else call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & @@ -1870,11 +1870,7 @@ subroutine ice_read_nc_1D(fid, varname, work, diag, & integer (kind=int_kind) :: & varid, & ! netcdf id for field status, & ! status output from netcdf routines - ndim, nvar, & ! sizes of netcdf file - dimlen ! size of dimension - - character (char_len) :: & - dimname ! dimension name + ndim, nvar ! sizes of netcdf file real (kind=dbl_kind), dimension(xdim) :: & workg ! output array (real, 8-byte) @@ -1958,11 +1954,7 @@ subroutine ice_read_nc_2D(fid, varname, work, diag, & integer (kind=int_kind) :: & varid, & ! netcdf id for field status, & ! status output from netcdf routines - ndim, nvar, & ! sizes of netcdf file - dimlen ! size of dimension - - character (char_len) :: & - dimname ! dimension name + ndim, nvar ! sizes of netcdf file real (kind=dbl_kind), dimension(xdim,ydim) :: & workg ! output array (real, 8-byte) @@ -2049,11 +2041,7 @@ subroutine ice_read_nc_3D(fid, varname, work, diag, & integer (kind=int_kind) :: & varid, & ! netcdf id for field status, & ! status output from netcdf routines - ndim, nvar, & ! sizes of netcdf file - dimlen ! size of dimension - - character (char_len) :: & - dimname ! dimension name + ndim, nvar ! sizes of netcdf file real (kind=dbl_kind), dimension(xdim,ydim,zdim) :: & workg ! output array (real, 8-byte) @@ -2121,12 +2109,12 @@ subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec ! record number + nrec ! record number logical (kind=log_kind), intent(in) :: & diag ! if true, write diagnostic output - character (char_len), intent(in) :: & + character (char_len), intent(in) :: & varname ! field name in netcdf file integer (kind=int_kind), optional, intent(in) :: & @@ -2143,7 +2131,7 @@ subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & work_z ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid, & ! netcdf id for field status, & ! status output from netcdf routines ndim, nvar, & ! sizes of netcdf file @@ -2151,11 +2139,11 @@ subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & ndims, & ! number of dimensions dimlen ! dimension size - integer (kind=int_kind), dimension(10) :: & + integer (kind=int_kind), dimension(10) :: & dimids ! generic size dimids character (char_len) :: & - dimname ! dimension name + dimname ! dimension name integer (kind=int_kind) :: lnrec ! local value of nrec @@ -2204,11 +2192,11 @@ subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & endif !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- status = nf90_get_var( fid, varid, work_z, & - start=(/1,lnrec/), & + start=(/1,lnrec/), & count=(/nilyr,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -2221,8 +2209,8 @@ subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & !------------------------------------------------------------------- if (my_task==master_task .and. diag) then - write(nu_diag,'(2a,i8,a,i8,2a)') & - subname,' fid= ',fid, ', lnrec = ',lnrec, & + write(nu_diag,'(2a,i8,a,i8,2a)') & + subname,' fid= ',fid, ', lnrec = ',lnrec, & ', varname = ',trim(varname) status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -2256,7 +2244,7 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id varid , & ! variable id - nrec ! record number + nrec ! record number logical (kind=log_kind), intent(in) :: & diag ! if true, write diagnostic output @@ -2276,7 +2264,7 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & status ! status output from netcdf routines ! ndim, nvar, & ! sizes of netcdf file ! id, & ! dimension index @@ -2327,11 +2315,11 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & if (my_task == master_task) then !-------------------------------------------------------------- - ! Write global array + ! Write global array !-------------------------------------------------------------- status = nf90_put_var( fid, varid, work_g1, & - start=(/1,1,nrec/), & + start=(/1,1,nrec/), & count=(/nx,ny,1/)) endif ! my_task = master_task @@ -2341,8 +2329,8 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & !------------------------------------------------------------------- if (my_task==master_task .and. diag) then -! write(nu_diag,*) & -! subname,' fid= ',fid, ', nrec = ',nrec, & +! write(nu_diag,*) & +! subname,' fid= ',fid, ', nrec = ',nrec, & ! ', varid = ',varid ! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) ! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -2357,7 +2345,7 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & endif deallocate(work_g1) - + #else call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) @@ -2379,7 +2367,7 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id varid , & ! variable id - nrec ! record number + nrec ! record number logical (kind=log_kind), intent(in) :: & diag ! if true, write diagnostic output @@ -2399,7 +2387,7 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & n, & ! ncat index status ! status output from netcdf routines ! ndim, nvar, & ! sizes of netcdf file @@ -2457,11 +2445,11 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & if (my_task == master_task) then !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- status = nf90_put_var( fid, varid, work_g1, & - start=(/1,1,1,nrec/), & + start=(/1,1,1,nrec/), & count=(/nx,ny,ncat,1/)) endif ! my_task = master_task @@ -2471,8 +2459,8 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & !------------------------------------------------------------------- if (my_task==master_task .and. diag) then -! write(nu_diag,*) & -! subname,' fid= ',fid, ', nrec = ',nrec, & +! write(nu_diag,*) & +! subname,' fid= ',fid, ', nrec = ',nrec, & ! ', varid = ',varid ! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) ! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -2491,14 +2479,14 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & endif deallocate(work_g1) - + #else call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) #endif end subroutine ice_write_nc_xyz - + !======================================================================= ! Read a netcdf file. @@ -2506,15 +2494,15 @@ end subroutine ice_write_nc_xyz ! work_g is a real array ! ! Adapted by William Lipscomb, LANL, from ice_read -! Adapted by Ann Keen, Met Office, to read from a netcdf file +! Adapted by Ann Keen, Met Office, to read from a netcdf file subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec ! record number + nrec ! record number - character (char_len), intent(in) :: & + character (char_len), intent(in) :: & varname ! field name in netcdf file real (kind=dbl_kind), dimension(nx_global,ny_global), intent(out) :: & @@ -2529,12 +2517,12 @@ subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid, & ! netcdf id for field status ! status output from netcdf routines ! ndim, nvar, & ! sizes of netcdf file ! id, & ! dimension index -! dimlen ! size of dimension +! dimlen ! size of dimension real (kind=dbl_kind) :: & amin, amax, asum ! min, max values and sum of input array @@ -2551,7 +2539,7 @@ subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) else allocate(work_g3(1,1)) ! to save memory endif - work_g3(:,:) = c0 + work_g3(:,:) = c0 endif work_g(:,:) = c0 @@ -2569,9 +2557,9 @@ subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) endif !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- - + if (orca_halogrid) then status = nf90_get_var( fid, varid, work_g3, & start=(/1,1,nrec/), & @@ -2583,7 +2571,7 @@ subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) work_g=work_g3(2:nx_global+1,1:ny_global) else status = nf90_get_var( fid, varid, work_g, & - start=(/1,1,nrec/), & + start=(/1,1,nrec/), & count=(/nx_global,ny_global,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & @@ -2597,8 +2585,8 @@ subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) !------------------------------------------------------------------- if (my_task == master_task .and. diag) then -! write(nu_diag,*) & -! subname,' fid= ',fid, ', nrec = ',nrec, & +! write(nu_diag,*) & +! subname,' fid= ',fid, ', nrec = ',nrec, & ! ', varname = ',trim(varname) ! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) ! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar @@ -2638,7 +2626,7 @@ subroutine ice_close_nc(fid) #ifdef USE_NETCDF integer (kind=int_kind) :: & - status ! status variable from netCDF routine + status ! status variable from netCDF routine if (my_task == master_task) then status = nf90_close(fid) @@ -2667,13 +2655,13 @@ subroutine ice_read_nc_uv(fid, nrec, nzlev, varname, work, diag, & integer (kind=int_kind), intent(in) :: & fid , & ! file id - nrec , & ! record number + nrec , & ! record number nzlev ! z level logical (kind=log_kind), intent(in) :: & diag ! if true, write diagnostic output - character (len=*), intent(in) :: & + character (len=*), intent(in) :: & varname ! field name in netcdf file real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(out) :: & @@ -2692,7 +2680,7 @@ subroutine ice_read_nc_uv(fid, nrec, nzlev, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & varid , & ! variable id status ! status output from netcdf routines ! ndim, nvar, & ! sizes of netcdf file @@ -2739,11 +2727,11 @@ subroutine ice_read_nc_uv(fid, nrec, nzlev, varname, work, diag, & endif !-------------------------------------------------------------- - ! Read global array + ! Read global array !-------------------------------------------------------------- status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,nzlev,nrec/), & + start=(/1,1,nzlev,nrec/), & count=(/nx,ny,1,1/)) if (status /= nf90_noerr) then call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 70e70621a..64b8d2101 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -9,7 +9,7 @@ ! 2004-05: Block structure added by William Lipscomb ! Restart module separated from history module ! 2006 ECH: Accepted some CESM code into mainstream CICE -! Converted to free source form (F90) +! Converted to free source form (F90) ! 2008 ECH: Rearranged order in which internal stresses are written and read ! 2010 ECH: Changed eice, esno to qice, qsno ! 2012 ECH: Added routines for reading/writing extended grid @@ -61,7 +61,7 @@ subroutine dumpfile(filename_spec) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & - stresspU, stressmU, stress12U + stresspU, stressmU, stress12U use ice_flux, only: coszen use ice_grid, only: grid_ice, tmask, iceumask, iceemask, icenmask use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel, & @@ -85,7 +85,7 @@ subroutine dumpfile(filename_spec) character(len=*), parameter :: subname = '(dumpfile)' call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & - nt_qice_out=nt_qice, nt_qsno_out=nt_qsno) + nt_qice_out=nt_qice, nt_qsno_out=nt_qsno) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) @@ -162,7 +162,7 @@ subroutine dumpfile(filename_spec) !----------------------------------------------------------------- ! radiation fields !----------------------------------------------------------------- - + if (restart_coszen) call write_restart_field(nu_dump,0,coszen,'ruf8','coszen',1,diag) call write_restart_field(nu_dump,0,scale_factor,'ruf8','scale_factor',1,diag) @@ -214,7 +214,7 @@ subroutine dumpfile(filename_spec) !----------------------------------------------------------------- ! ice mask for dynamics !----------------------------------------------------------------- - + !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block @@ -316,7 +316,7 @@ subroutine restartfile (ice_ic) file=__FILE__, line=__LINE__) call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & - nt_qice_out=nt_qice, nt_qsno_out=nt_qsno) + nt_qice_out=nt_qice, nt_qsno_out=nt_qsno) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) @@ -440,7 +440,7 @@ subroutine restartfile (ice_ic) !----------------------------------------------------------------- if (my_task == master_task) write(nu_diag,*) & 'internal stress components' - + call read_restart_field(nu_restart,0,stressp_1,'ruf8', & 'stressp_1',1,diag,field_loc_center,field_type_scalar) ! stressp_1 call read_restart_field(nu_restart,0,stressp_3,'ruf8', & @@ -755,7 +755,7 @@ subroutine restartfile_v4 (ice_ic) file=__FILE__, line=__LINE__) call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & - nt_qice_out=nt_qice, nt_qsno_out=nt_qsno) + nt_qice_out=nt_qice, nt_qsno_out=nt_qsno) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) @@ -885,7 +885,7 @@ subroutine restartfile_v4 (ice_ic) !----------------------------------------------------------------- if (my_task == master_task) write(nu_diag,*) & 'internal stress components' - + allocate (work_g1(nx_global,ny_global), & work_g2(nx_global,ny_global)) @@ -1055,7 +1055,7 @@ subroutine restartfile_v4 (ice_ic) ! creates new file filename = trim(restart_dir) // '/iced.converted' - call dumpfile(filename) + call dumpfile(filename) call final_restart ! stop diff --git a/cicecore/cicedynB/infrastructure/ice_restoring.F90 b/cicecore/cicedynB/infrastructure/ice_restoring.F90 index f21e50513..221d066df 100644 --- a/cicecore/cicedynB/infrastructure/ice_restoring.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restoring.F90 @@ -46,7 +46,7 @@ module ice_restoring !======================================================================= -! Allocates and initializes arrays needed for restoring the ice state +! Allocates and initializes arrays needed for restoring the ice state ! in cells surrounding the grid. @@ -115,7 +115,7 @@ subroutine ice_HaloRestore_init !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block, & !$OMP iglob,jglob,iblock,jblock) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -154,7 +154,7 @@ subroutine ice_HaloRestore_init !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block, & !$OMP i,j,n,nt,ibc,npad) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -280,7 +280,7 @@ subroutine ice_HaloRestore_init enddo if (my_task == master_task) & - write (nu_diag,*) 'ice restoring timescale = ',trestore,' days' + write (nu_diag,*) 'ice restoring timescale = ',trestore,' days' end subroutine ice_HaloRestore_init @@ -318,7 +318,7 @@ subroutine set_restore_var (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & Tair , & ! air temperature (K) - Tf ! freezing temperature (C) + Tf ! freezing temperature (C) real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), intent(in) :: & salinz , & ! initial salinity profile @@ -395,7 +395,7 @@ subroutine set_restore_var (nx_block, ny_block, & vicen(i,j,n) = c0 vsnon(i,j,n) = c0 if (tmask(i,j)) then - trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature + trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature else trcrn(i,j,nt_Tsfc,n) = c0 ! on land gridcells endif @@ -526,7 +526,7 @@ subroutine set_restore_var (nx_block, ny_block, & ! surface temperature trcrn(i,j,nt_Tsfc,n) = Tsfc ! deg C - ! ice enthalpy, salinity + ! ice enthalpy, salinity do k = 1, nilyr trcrn(i,j,nt_qice+k-1,n) = qin(k) trcrn(i,j,nt_sice+k-1,n) = salinz(i,j,k) @@ -569,7 +569,7 @@ subroutine ice_HaloRestore i,j,iblk,nt,n, &! dummy loop indices ilo,ihi,jlo,jhi, &! beginning and end of physical domain ibc, &! ghost cell column or row - ntrcr, &! + ntrcr, &! npad ! padding column/row counter type (block) :: & @@ -611,7 +611,7 @@ subroutine ice_HaloRestore !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block, & !$OMP i,j,n,nt,ibc,npad) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo diff --git a/cicecore/cicedynB/infrastructure/io/io_binary/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_binary/ice_history_write.F90 index b98e09814..2a3f042c3 100644 --- a/cicecore/cicedynB/infrastructure/io/io_binary/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_binary/ice_history_write.F90 @@ -6,7 +6,7 @@ ! Elizabeth C. Hunke and William H. Lipscomb, LANL ! C. M. Bitz, UW ! -! 2004 WHL: Block structure added +! 2004 WHL: Block structure added ! 2006 ECH: Accepted some CESM code into mainstream CICE ! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. ! Added histfreq_n and histfreq='h' options, removed histfreq='w' @@ -25,7 +25,7 @@ module ice_history_write implicit none private public :: ice_write_hist - + !======================================================================= contains diff --git a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 index 503bd18ab..b2b438ebe 100644 --- a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 @@ -17,7 +17,7 @@ module ice_restart use ice_fileunits, only: nu_dump_lvl, nu_dump_pond, nu_dump_hbrine use ice_fileunits, only: nu_dump_iso, nu_dump_snow use ice_fileunits, only: nu_dump_bgc, nu_dump_aero, nu_dump_fsd - use ice_fileunits, only: nu_restart, nu_restart_eap, nu_restart_FY, nu_restart_age + use ice_fileunits, only: nu_restart, nu_restart_eap, nu_restart_FY, nu_restart_age use ice_fileunits, only: nu_restart_lvl, nu_restart_pond, nu_restart_hbrine use ice_fileunits, only: nu_restart_bgc, nu_restart_aero, nu_restart_fsd use ice_fileunits, only: nu_restart_iso, nu_restart_snow @@ -98,7 +98,7 @@ subroutine init_restart_read(ice_ic) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (present(ice_ic)) then + if (present(ice_ic)) then filename = trim(ice_ic) else if (my_task == master_task) then @@ -131,7 +131,7 @@ subroutine init_restart_read(ice_ic) call broadcast_scalar(time_forc,master_task) call broadcast_scalar(myear,master_task) call set_date_from_timesecs(timesecs) - + istep1 = istep0 ! Supplemental restart files @@ -464,7 +464,7 @@ subroutine init_restart_write(filename_spec) restart_file(1:lenstr(restart_file)),'.', & myear,'-',mmonth,'-',mday,'-',msec end if - + ! write pointer (path/file) if (my_task == master_task) then open(nu_rst_pointer,file=pointer_file) @@ -809,7 +809,7 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3, & endif end subroutine read_restart_field - + !======================================================================= ! Writes a single restart field. diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 index 97bb72dab..019ab8ce9 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 @@ -9,7 +9,7 @@ ! Elizabeth C. Hunke and William H. Lipscomb, LANL ! C. M. Bitz, UW ! -! 2004 WHL: Block structure added +! 2004 WHL: Block structure added ! 2006 ECH: Accepted some CESM code into mainstream CICE ! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. ! Added histfreq_n and histfreq='h' options, removed histfreq='w' @@ -30,7 +30,7 @@ module ice_history_write implicit none private public :: ice_write_hist - + !======================================================================= contains @@ -48,7 +48,7 @@ subroutine ice_write_hist (ns) use ice_blocks, only: nx_block, ny_block use ice_broadcast, only: broadcast_scalar use ice_calendar, only: msec, timesecs, idate, idate0, write_ic, & - histfreq, histfreq_n, days_per_year, use_leap_years, dayyr, & + histfreq, days_per_year, use_leap_years, dayyr, & hh_init, mm_init, ss_init use ice_communicate, only: my_task, master_task use ice_domain, only: distrb_info @@ -60,7 +60,10 @@ subroutine ice_write_hist (ns) lont_bounds, latt_bounds, lonu_bounds, latu_bounds, & lonn_bounds, latn_bounds, lone_bounds, late_bounds use ice_history_shared - use ice_restart_shared, only: runid, lcdf64 + use ice_restart_shared, only: lcdf64 +#ifdef CESMCOUPLED + use ice_restart_shared, only: runid +#endif #ifdef USE_NETCDF use netcdf #endif @@ -444,7 +447,7 @@ subroutine ice_write_hist (ns) dimidex(4)=kmtidb dimidex(5)=kmtida dimidex(6)=fmtid - + do i = 1, nvar_grdz if (igrdz(i)) then status = nf90_def_var(ncid, var_grdz(i)%short_name, & @@ -779,7 +782,7 @@ subroutine ice_write_hist (ns) work1 = ELAT*rad_to_deg call gather_global(work_g1,work1,master_task,distrb_info) END SELECT - + if (my_task == master_task) then status = nf90_inq_varid(ncid, var_coord(i)%short_name, varid) if (status /= nf90_noerr) call abort_ice(subname// & @@ -900,7 +903,7 @@ subroutine ice_write_hist (ns) call broadcast_scalar(var_nverts(i)%short_name,master_task) SELECT CASE (var_nverts(i)%short_name) CASE ('lont_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts work1(:,:,:) = lont_bounds(ivertex,:,:,:) call gather_global(work_g1, work1, master_task, distrb_info) if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 index 534637bbb..f647bd96b 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 @@ -59,7 +59,7 @@ subroutine init_restart_read(ice_ic) character(len=*), parameter :: subname = '(init_restart_read)' #ifdef USE_NETCDF - if (present(ice_ic)) then + if (present(ice_ic)) then filename = trim(ice_ic) else if (my_task == master_task) then @@ -78,7 +78,7 @@ subroutine init_restart_read(ice_ic) status = nf90_open(trim(filename), nf90_nowrite, ncid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: reading restart ncfile '//trim(filename)) - + if (use_restart_time) then status1 = nf90_noerr status = nf90_get_att(ncid, nf90_global, 'istep1', istep0) @@ -262,12 +262,12 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'uvelN',dims) call define_rest_field(ncid,'vvelN',dims) endif - + if (grid_ice == 'C') then call define_rest_field(ncid,'uvelE',dims) call define_rest_field(ncid,'vvelN',dims) endif - + if (restart_coszen) call define_rest_field(ncid,'coszen',dims) call define_rest_field(ncid,'scale_factor',dims) @@ -367,11 +367,11 @@ subroutine init_restart_write(filename_spec) enddo endif if (tr_bgc_Fe ) then - do k=1,n_fed + do k=1,n_fed write(nchar,'(i3.3)') k call define_rest_field(ncid,'fed'//trim(nchar),dims) enddo - do k=1,n_fep + do k=1,n_fep write(nchar,'(i3.3)') k call define_rest_field(ncid,'fep'//trim(nchar),dims) enddo @@ -482,17 +482,17 @@ subroutine init_restart_write(filename_spec) if (tr_bgc_PON) & call define_rest_field(ncid,'bgc_PON' ,dims) if (tr_bgc_DON) then - do k = 1, n_don + do k = 1, n_don write(nchar,'(i3.3)') k call define_rest_field(ncid,'bgc_DON'//trim(nchar) ,dims) enddo endif if (tr_bgc_Fe ) then - do k = 1, n_fed + do k = 1, n_fed write(nchar,'(i3.3)') k call define_rest_field(ncid,'bgc_Fed'//trim(nchar) ,dims) enddo - do k = 1, n_fep + do k = 1, n_fep write(nchar,'(i3.3)') k call define_rest_field(ncid,'bgc_Fep'//trim(nchar) ,dims) enddo @@ -557,7 +557,7 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'zSalinity'//trim(nchar),dims) enddo endif - + if (z_tracers) then if (tr_zaero) then do n = 1, n_zaero @@ -657,14 +657,14 @@ subroutine init_restart_write(filename_spec) enddo endif if (tr_bgc_Fe ) then - do n = 1, n_fed + do n = 1, n_fed write(ncharb,'(i3.3)') n do k = 1, nblyr+3 write(nchar,'(i3.3)') k call define_rest_field(ncid,'bgc_Fed'//trim(ncharb)//trim(nchar),dims) enddo enddo - do n = 1, n_fep + do n = 1, n_fep write(ncharb,'(i3.3)') n do k = 1, nblyr+3 write(nchar,'(i3.3)') k @@ -776,7 +776,7 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3, & #endif end subroutine read_restart_field - + !======================================================================= ! Writes a single restart field. @@ -818,7 +818,7 @@ subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) #ifdef USE_NETCDF status = nf90_inq_varid(ncid,trim(vname),varid) - if (ndim3 == ncat) then + if (ndim3 == ncat) then if (restart_ext) then call ice_write_nc(ncid, 1, varid, work, diag, restart_ext, varname=trim(vname)) else @@ -892,7 +892,7 @@ subroutine define_rest_field(ncid, vname, dims) call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) #endif - + end subroutine define_rest_field !======================================================================= diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 index 92f7663a2..6407d8c76 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 @@ -6,7 +6,7 @@ ! Elizabeth C. Hunke and William H. Lipscomb, LANL ! C. M. Bitz, UW ! -! 2004 WHL: Block structure added +! 2004 WHL: Block structure added ! 2006 ECH: Accepted some CESM code into mainstream CICE ! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. ! Added histfreq_n and histfreq='h' options, removed histfreq='w' @@ -27,7 +27,7 @@ module ice_history_write implicit none private public :: ice_write_hist - + !======================================================================= contains @@ -407,7 +407,7 @@ subroutine ice_write_hist (ns) endif if (f_bounds) then status = pio_put_att(File, varid, 'bounds', trim(coord_bounds(i))) - endif + endif enddo ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR, NZALYR, NFSD) @@ -446,14 +446,14 @@ subroutine ice_write_hist (ns) if (f_bounds) then status = pio_def_var(File, trim(var_nverts(i)%short_name), & lprecision,dimid_nverts, varid) - status = & + status = & pio_put_att(File,varid, 'long_name', trim(var_nverts(i)%long_name)) status = & pio_put_att(File, varid, 'units', trim(var_nverts(i)%units)) call ice_write_hist_fill(File,varid,var_nverts(i)%short_name,history_precision) endif enddo - + !----------------------------------------------------------------- ! define attributes for time-variant variables !----------------------------------------------------------------- @@ -507,7 +507,7 @@ subroutine ice_write_hist (ns) call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) endif enddo ! num_avail_hist_fields_3Dz - + !----------------------------------------------------------------- ! 3D (biology ice layers) !----------------------------------------------------------------- @@ -690,7 +690,7 @@ subroutine ice_write_hist (ns) bnd_start = (/1,1/) bnd_length = (/2,1/) status = pio_put_var(File,varid,ival=time_bounds, & - start=bnd_start(:),count=bnd_length(:)) + start=bnd_start(:),count=bnd_length(:)) endif !----------------------------------------------------------------- @@ -738,7 +738,7 @@ subroutine ice_write_hist (ns) status = pio_inq_varid(File, var_grdz(i)%short_name, varid) SELECT CASE (var_grdz(i)%short_name) CASE ('NCAT') - status = pio_put_var(File, varid, hin_max(1:ncat_hist)) + status = pio_put_var(File, varid, hin_max(1:ncat_hist)) CASE ('NFSD') status = pio_put_var(File, varid, floe_rad_c(1:nfsd_hist)) CASE ('VGRDi') @@ -826,35 +826,35 @@ subroutine ice_write_hist (ns) do i = 1, nvar_verts SELECT CASE (var_nverts(i)%short_name) CASE ('lont_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = lont_bounds(ivertex,:,:,1:nblocks) enddo CASE ('latt_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = latt_bounds(ivertex,:,:,1:nblocks) enddo CASE ('lonu_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = lonu_bounds(ivertex,:,:,1:nblocks) enddo CASE ('latu_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = latu_bounds(ivertex,:,:,1:nblocks) enddo CASE ('lonn_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = lonn_bounds(ivertex,:,:,1:nblocks) enddo CASE ('latn_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = latn_bounds(ivertex,:,:,1:nblocks) enddo CASE ('lone_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = lone_bounds(ivertex,:,:,1:nblocks) enddo CASE ('late_bounds') - do ivertex = 1, nverts + do ivertex = 1, nverts workd3v(ivertex,:,:,:) = late_bounds(ivertex,:,:,1:nblocks) enddo END SELECT diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_pio.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_pio.F90 index d4149f7bf..b242f542b 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_pio.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_pio.F90 @@ -10,7 +10,7 @@ module ice_pio use ice_communicate use ice_domain, only : nblocks, blocks_ice use ice_domain_size - use ice_fileunits + use ice_fileunits use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use pio @@ -52,7 +52,7 @@ subroutine ice_pio_init(mode, filename, File, clobber, cdf64, iotype) use perf_mod, only : t_initf #endif #endif - + implicit none character(len=*) , intent(in), optional :: mode character(len=*) , intent(in), optional :: filename @@ -140,14 +140,14 @@ subroutine ice_pio_init(mode, filename, File, clobber, cdf64, iotype) #endif if (present(mode) .and. present(filename) .and. present(File)) then - + if (trim(mode) == 'write') then lclobber = .false. if (present(clobber)) lclobber=clobber - + lcdf64 = .false. if (present(cdf64)) lcdf64=cdf64 - + if (File%fh<0) then ! filename not open inquire(file=trim(filename),exist=exists) @@ -178,7 +178,7 @@ subroutine ice_pio_init(mode, filename, File, clobber, cdf64, iotype) ! filename is already open, just return endif end if - + if (trim(mode) == 'read') then inquire(file=trim(filename),exist=exists) if (exists) then @@ -205,7 +205,7 @@ subroutine ice_pio_initdecomp_2d(iodesc, precision) integer (kind=int_kind) :: & iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k - type(block) :: this_block + type(block) :: this_block integer(kind=int_kind), pointer :: dof2d(:) integer(kind=int_kind) :: lprecision @@ -218,12 +218,12 @@ subroutine ice_pio_initdecomp_2d(iodesc, precision) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j=1,ny_block do i=1,nx_block n = n+1 @@ -249,7 +249,7 @@ subroutine ice_pio_initdecomp_2d(iodesc, precision) endif deallocate(dof2d) - + end subroutine ice_pio_initdecomp_2d !================================================================================ @@ -261,9 +261,9 @@ subroutine ice_pio_initdecomp_3d (ndim3, iodesc, remap, precision) logical, optional :: remap integer(kind=int_kind), optional, intent(in) :: precision integer (kind=int_kind) :: & - iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k + iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k - type(block) :: this_block + type(block) :: this_block logical :: lremap integer(kind=int_kind), pointer :: dof3d(:) integer(kind=int_kind) :: lprecision @@ -279,12 +279,12 @@ subroutine ice_pio_initdecomp_3d (ndim3, iodesc, remap, precision) ! Reorder the ndim3 and nblocks loops to avoid a temporary array in restart read/write n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - do k=1,ndim3 + do k=1,ndim3 do j=1,ny_block do i=1,nx_block n = n+1 @@ -295,7 +295,7 @@ subroutine ice_pio_initdecomp_3d (ndim3, iodesc, remap, precision) else lon = this_block%i_glob(i) lat = this_block%j_glob(j) - dof3d(n) = ((lat-1)*nx_global + lon) + (k-1)*nx_global*ny_global + dof3d(n) = ((lat-1)*nx_global + lon) + (k-1)*nx_global*ny_global endif enddo !i enddo !j @@ -303,9 +303,9 @@ subroutine ice_pio_initdecomp_3d (ndim3, iodesc, remap, precision) enddo ! iblk else n=0 - do k=1,ndim3 + do k=1,ndim3 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -320,7 +320,7 @@ subroutine ice_pio_initdecomp_3d (ndim3, iodesc, remap, precision) else lon = this_block%i_glob(i) lat = this_block%j_glob(j) - dof3d(n) = ((lat-1)*nx_global + lon) + (k-1)*nx_global*ny_global + dof3d(n) = ((lat-1)*nx_global + lon) + (k-1)*nx_global*ny_global endif enddo !i enddo !j @@ -350,9 +350,9 @@ subroutine ice_pio_initdecomp_3d_inner(ndim3, inner_dim, iodesc, precision) integer(kind=int_kind), optional, intent(in) :: precision integer (kind=int_kind) :: & - iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k + iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k - type(block) :: this_block + type(block) :: this_block integer(kind=int_kind), pointer :: dof3d(:) integer(kind=int_kind) :: lprecision @@ -365,12 +365,12 @@ subroutine ice_pio_initdecomp_3d_inner(ndim3, inner_dim, iodesc, precision) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j=1,ny_block do i=1,nx_block do k=1,ndim3 @@ -410,9 +410,9 @@ subroutine ice_pio_initdecomp_4d (ndim3, ndim4, iodesc, precision) integer(kind=int_kind), optional, intent(in) :: precision integer (kind=int_kind) :: & - iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k,l + iblk,ilo,ihi,jlo,jhi,lon,lat,i,j,n,k,l - type(block) :: this_block + type(block) :: this_block integer(kind=int_kind), pointer :: dof4d(:) integer(kind=int_kind) :: lprecision @@ -427,12 +427,12 @@ subroutine ice_pio_initdecomp_4d (ndim3, ndim4, iodesc, precision) do l=1,ndim4 do k=1,ndim3 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j=1,ny_block do i=1,nx_block n = n+1 @@ -444,8 +444,8 @@ subroutine ice_pio_initdecomp_4d (ndim3, ndim4, iodesc, precision) lon = this_block%i_glob(i) lat = this_block%j_glob(j) dof4d(n) = ((lat-1)*nx_global + lon) & - + (k-1)*nx_global*ny_global & - + (l-1)*nx_global*ny_global*ndim3 + + (k-1)*nx_global*ny_global & + + (l-1)*nx_global*ny_global*ndim3 endif enddo !i enddo !j @@ -464,7 +464,7 @@ subroutine ice_pio_initdecomp_4d (ndim3, ndim4, iodesc, precision) deallocate(dof4d) end subroutine ice_pio_initdecomp_4d - + !================================================================================ end module ice_pio diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 03a1fd07f..1124cc048 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -61,7 +61,7 @@ subroutine init_restart_read(ice_ic) character(len=*), parameter :: subname = '(init_restart_read)' - if (present(ice_ic)) then + if (present(ice_ic)) then filename = trim(ice_ic) else if (my_task == master_task) then @@ -83,7 +83,7 @@ subroutine init_restart_read(ice_ic) if (restart_format == 'pio_pnetcdf') iotype = PIO_IOTYPE_PNETCDF File%fh=-1 call ice_pio_init(mode='read', filename=trim(filename), File=File, iotype=iotype) - + call ice_pio_initdecomp(iodesc=iodesc2d, precision=8) call ice_pio_initdecomp(ndim3=ncat , iodesc=iodesc3d_ncat,remap=.true., precision=8) @@ -122,7 +122,7 @@ subroutine init_restart_read(ice_ic) ! call broadcast_scalar(time,master_task) ! call broadcast_scalar(time_forc,master_task) call broadcast_scalar(myear,master_task) - + istep1 = istep0 ! if runid is bering then need to correct npt for istep0 @@ -219,7 +219,7 @@ subroutine init_restart_write(filename_spec) restart_file(1:lenstr(restart_file)),'.', & myear,'-',mmonth,'-',mday,'-',msec end if - + if (restart_format(1:3) /= 'bin') filename = trim(filename) // '.nc' ! write pointer (path/file) @@ -230,7 +230,7 @@ subroutine init_restart_write(filename_spec) endif ! if (restart_format(1:3) == 'pio') then - + iotype = PIO_IOTYPE_NETCDF if (restart_format == 'pio_pnetcdf') iotype = PIO_IOTYPE_PNETCDF File%fh=-1 @@ -807,14 +807,14 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) endif endif - + endif ! else ! call abort_ice(subname//"ERROR: Invalid restart_format: "//trim(restart_format)) ! endif ! restart_format end subroutine read_restart_field - + !======================================================================= ! Writes a single restart field. @@ -862,10 +862,10 @@ subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) write(nu_diag,*)'Parallel restart file write: ',vname status = pio_inq_varid(File,trim(vname),vardesc) - + status = pio_inq_varndims(File, vardesc, ndims) - if (ndims==3) then + if (ndims==3) then call pio_write_darray(File, vardesc, iodesc3d_ncat,work(:,:,:,1:nblocks), & status, fillval=c0) elseif (ndims == 2) then @@ -937,7 +937,7 @@ subroutine define_rest_field(File, vname, dims) character(len=*), parameter :: subname = '(define_rest_field)' status = pio_def_var(File,trim(vname),pio_double,dims,vardesc) - + end subroutine define_rest_field !======================================================================= diff --git a/cicecore/drivers/direct/hadgem3/CICE.F90 b/cicecore/drivers/direct/hadgem3/CICE.F90 index b0176e801..fe322a04d 100644 --- a/cicecore/drivers/direct/hadgem3/CICE.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE.F90 @@ -1,21 +1,21 @@ !======================================================================= ! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. -! +! ! Copyright 2022. Triad National Security, LLC. This software was -! produced under U.S. Government contract DE-AC52-06NA25396 for Los +! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad -! National Security, LLC for the U.S. Department of Energy. The U.S. -! Government has rights to use, reproduce, and distribute this software. -! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY +! National Security, LLC for the U.S. Department of Energy. The U.S. +! Government has rights to use, reproduce, and distribute this software. +! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY ! WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF -! THIS SOFTWARE. If software is modified to produce derivative works, -! such modified software should be clearly marked, so as not to confuse +! THIS SOFTWARE. If software is modified to produce derivative works, +! such modified software should be clearly marked, so as not to confuse ! it with the version available from LANL. ! ! The full license and distribution policy are available from ! https://github.com/CICE-Consortium -! +! !======================================================================= ! ! Main driver routine for CICE. Initializes and steps through the model. @@ -48,7 +48,7 @@ program icemodel call CICE_Run !----------------------------------------------------------------- - ! Finalize CICE + ! Finalize CICE !----------------------------------------------------------------- call CICE_Finalize diff --git a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 index fb39375b4..87dc8d9a1 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 @@ -176,11 +176,11 @@ subroutine cice_init if (icepack_warnings_aborted()) call abort_ice(subname, & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -215,7 +215,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions end subroutine cice_init @@ -297,7 +297,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -308,17 +308,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -329,7 +329,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -340,7 +340,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -354,7 +354,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -368,7 +368,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -383,7 +383,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -404,7 +404,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 index c269ab382..91f7985bd 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 @@ -7,7 +7,7 @@ ! William H. Lipscomb, LANL ! ! 2006 ECH: moved exit timeLoop to prevent execution of unnecessary timestep -! 2006 ECH: Streamlined for efficiency +! 2006 ECH: Streamlined for efficiency ! 2006 ECH: Converted to free source form (F90) ! 2007 BPB: Modified Delta-Eddington shortwave interface ! 2008 ECH: moved ESMF code to its own driver @@ -46,14 +46,14 @@ subroutine CICE_Run use ice_calendar, only: stop_now, advance_timestep use ice_forcing, only: get_forcing_atmo, get_forcing_ocn, & get_wave_spec - use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, fzaero_data, & + use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, fzaero_data, & faero_default use ice_flux, only: init_flux_atm, init_flux_ocn use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_couple, timer_step logical (kind=log_kind) :: & tr_aero, tr_zaero, skl_bgc, z_tracers, wave_spec, tr_fsd - + character(len=*), parameter :: subname = '(CICE_Run)' !-------------------------------------------------------------------- @@ -118,7 +118,7 @@ subroutine CICE_Run ! end of timestep loop !-------------------------------------------------------------------- - call ice_timer_stop(timer_step) ! end timestepping loop timer + call ice_timer_stop(timer_step) ! end timestepping loop timer end subroutine CICE_Run @@ -163,7 +163,7 @@ subroutine ice_step timer_hist, timer_readwrite integer (kind=int_kind) :: & - iblk , & ! block index + iblk , & ! block index k , & ! dynamics supercycling index ktherm ! thermodynamics is off when ktherm = -1 @@ -238,7 +238,7 @@ subroutine ice_step !----------------------------------------------------------------- ! thermodynamics and biogeochemistry !----------------------------------------------------------------- - + call step_therm1 (dt, iblk) ! vertical thermodynamics call biogeochemistry (dt, iblk) ! biogeochemistry call step_therm2 (dt, iblk) ! ice thickness distribution thermo @@ -341,7 +341,7 @@ subroutine ice_step if (tr_fsd) call write_restart_fsd if (tr_aero) call write_restart_aero if (solve_zsal .or. skl_bgc .or. z_tracers) & - call write_restart_bgc + call write_restart_bgc if (tr_brine) call write_restart_hbrine if (kdyn == 2) call write_restart_eap call final_restart @@ -350,7 +350,7 @@ subroutine ice_step call ice_timer_stop(timer_readwrite) ! reading/writing end subroutine ice_step - + !======================================================================= ! ! Prepare for coupling @@ -380,12 +380,12 @@ subroutine coupling_prep (iblk) use ice_step_mod, only: ocean_mixed_layer use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop - integer (kind=int_kind), intent(in) :: & - iblk ! block index + integer (kind=int_kind), intent(in) :: & + iblk ! block index ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain n , & ! thickness category index i,j , & ! horizontal indices @@ -466,7 +466,7 @@ subroutine coupling_prep (iblk) do j = jlo, jhi do i = ilo, ihi if (aicen(i,j,n,iblk) > puny) then - + alvdf(i,j,iblk) = alvdf(i,j,iblk) & + alvdfn(i,j,n,iblk)*aicen(i,j,n,iblk) alidf(i,j,iblk) = alidf(i,j,iblk) & @@ -491,7 +491,7 @@ subroutine coupling_prep (iblk) + apeffn(i,j,n,iblk)*aicen(i,j,n,iblk) snowfrac(i,j,iblk) = snowfrac(i,j,iblk) & ! for history + snowfracn(i,j,n,iblk)*aicen(i,j,n,iblk) - + endif ! aicen > puny enddo enddo @@ -521,8 +521,8 @@ subroutine coupling_prep (iblk) fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) - fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) - fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) + fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) + fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) if (nbtrcr > 0) then do k = 1, nbtrcr @@ -543,7 +543,7 @@ subroutine coupling_prep (iblk) enddo !----------------------------------------------------------------- - ! Divide fluxes by ice area + ! Divide fluxes by ice area ! - the CESM coupler assumes fluxes are per unit ice area ! - also needed for global budget in diagnostics !----------------------------------------------------------------- @@ -568,21 +568,21 @@ subroutine coupling_prep (iblk) alvdf (:,:,iblk), alidf (:,:,iblk), & fzsal (:,:,iblk), fzsal_g (:,:,iblk), & flux_bio(:,:,1:nbtrcr,iblk)) - + !echmod - comment this out for efficiency, if .not. calc_Tsfc if (.not. calc_Tsfc) then !--------------------------------------------------------------- - ! If surface fluxes were provided, conserve these fluxes at ice - ! free points by passing to ocean. + ! If surface fluxes were provided, conserve these fluxes at ice + ! free points by passing to ocean. !--------------------------------------------------------------- - call sfcflux_to_ocn & + call sfcflux_to_ocn & (nx_block, ny_block, & tmask (:,:,iblk), aice_init(:,:,iblk), & fsurfn_f (:,:,:,iblk), flatn_f(:,:,:,iblk), & fresh (:,:,iblk), fhocn (:,:,iblk)) - endif + endif !echmod call ice_timer_stop(timer_couple) ! atm/ocn coupling @@ -592,10 +592,10 @@ end subroutine coupling_prep !======================================================================= ! ! If surface heat fluxes are provided to CICE instead of CICE calculating -! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can ! be provided at points which do not have ice. (This is could be due to ! the heat fluxes being calculated on a lower resolution grid or the -! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! heat fluxes not recalculated at every CICE timestep.) At ice free points, ! conserve energy and water by passing these fluxes to the ocean. ! ! author: A. McLaren, Met Office @@ -629,7 +629,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & i, j, n ! horizontal indices - + real (kind=dbl_kind) :: & puny, & ! Lsub, & ! @@ -656,7 +656,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & enddo ! j enddo ! n -#endif +#endif end subroutine sfcflux_to_ocn diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 index fb39375b4..87dc8d9a1 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 @@ -176,11 +176,11 @@ subroutine cice_init if (icepack_warnings_aborted()) call abort_ice(subname, & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -215,7 +215,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions end subroutine cice_init @@ -297,7 +297,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -308,17 +308,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -329,7 +329,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -340,7 +340,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -354,7 +354,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -368,7 +368,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -383,7 +383,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -404,7 +404,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 index 272174fe7..ea6a65165 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 @@ -7,7 +7,7 @@ ! William H. Lipscomb, LANL ! ! 2006 ECH: moved exit timeLoop to prevent execution of unnecessary timestep -! 2006 ECH: Streamlined for efficiency +! 2006 ECH: Streamlined for efficiency ! 2006 ECH: Converted to free source form (F90) ! 2007 BPB: Modified Delta-Eddington shortwave interface ! 2008 ECH: moved ESMF code to its own driver @@ -46,14 +46,14 @@ subroutine CICE_Run use ice_calendar, only: stop_now, advance_timestep use ice_forcing, only: get_forcing_atmo, get_forcing_ocn, & get_wave_spec - use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, fzaero_data, & + use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, fzaero_data, & faero_default use ice_flux, only: init_flux_atm, init_flux_ocn use ice_timers, only: ice_timer_start, ice_timer_stop, & timer_couple, timer_step logical (kind=log_kind) :: & tr_aero, tr_zaero, skl_bgc, z_tracers, wave_spec, tr_fsd - + character(len=*), parameter :: subname = '(CICE_Run)' !-------------------------------------------------------------------- @@ -118,7 +118,7 @@ subroutine CICE_Run ! end of timestep loop !-------------------------------------------------------------------- - call ice_timer_stop(timer_step) ! end timestepping loop timer + call ice_timer_stop(timer_step) ! end timestepping loop timer end subroutine CICE_Run @@ -163,7 +163,7 @@ subroutine ice_step timer_hist, timer_readwrite integer (kind=int_kind) :: & - iblk , & ! block index + iblk , & ! block index k , & ! dynamics supercycling index ktherm ! thermodynamics is off when ktherm = -1 @@ -238,7 +238,7 @@ subroutine ice_step !----------------------------------------------------------------- ! thermodynamics and biogeochemistry !----------------------------------------------------------------- - + call step_therm1 (dt, iblk) ! vertical thermodynamics call biogeochemistry (dt, iblk) ! biogeochemistry call step_therm2 (dt, iblk) ! ice thickness distribution thermo @@ -341,7 +341,7 @@ subroutine ice_step if (tr_fsd) call write_restart_fsd if (tr_aero) call write_restart_aero if (solve_zsal .or. skl_bgc .or. z_tracers) & - call write_restart_bgc + call write_restart_bgc if (tr_brine) call write_restart_hbrine if (kdyn == 2) call write_restart_eap call final_restart @@ -350,7 +350,7 @@ subroutine ice_step call ice_timer_stop(timer_readwrite) ! reading/writing end subroutine ice_step - + !======================================================================= ! ! Prepare for coupling @@ -380,12 +380,12 @@ subroutine coupling_prep (iblk) use ice_step_mod, only: ocean_mixed_layer use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop - integer (kind=int_kind), intent(in) :: & - iblk ! block index + integer (kind=int_kind), intent(in) :: & + iblk ! block index ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain n , & ! thickness category index i,j , & ! horizontal indices @@ -466,7 +466,7 @@ subroutine coupling_prep (iblk) do j = jlo, jhi do i = ilo, ihi if (aicen(i,j,n,iblk) > puny) then - + alvdf(i,j,iblk) = alvdf(i,j,iblk) & + alvdfn(i,j,n,iblk)*aicen(i,j,n,iblk) alidf(i,j,iblk) = alidf(i,j,iblk) & @@ -491,7 +491,7 @@ subroutine coupling_prep (iblk) + apeffn(i,j,n,iblk)*aicen(i,j,n,iblk) snowfrac(i,j,iblk) = snowfrac(i,j,iblk) & ! for history + snowfracn(i,j,n,iblk)*aicen(i,j,n,iblk) - + endif ! aicen > puny enddo enddo @@ -521,8 +521,8 @@ subroutine coupling_prep (iblk) fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) - fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) - fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) + fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) + fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) if (nbtrcr > 0) then do k = 1, nbtrcr @@ -543,7 +543,7 @@ subroutine coupling_prep (iblk) enddo !----------------------------------------------------------------- - ! Divide fluxes by ice area + ! Divide fluxes by ice area ! - the CESM coupler assumes fluxes are per unit ice area ! - also needed for global budget in diagnostics !----------------------------------------------------------------- @@ -570,21 +570,21 @@ subroutine coupling_prep (iblk) alvdf (:,:,iblk), alidf (:,:,iblk), & fzsal (:,:,iblk), fzsal_g (:,:,iblk), & flux_bio(:,:,1:nbtrcr,iblk)) - + !echmod - comment this out for efficiency, if .not. calc_Tsfc if (.not. calc_Tsfc) then !--------------------------------------------------------------- - ! If surface fluxes were provided, conserve these fluxes at ice - ! free points by passing to ocean. + ! If surface fluxes were provided, conserve these fluxes at ice + ! free points by passing to ocean. !--------------------------------------------------------------- - call sfcflux_to_ocn & + call sfcflux_to_ocn & (nx_block, ny_block, & tmask (:,:,iblk), aice_init(:,:,iblk), & fsurfn_f (:,:,:,iblk), flatn_f(:,:,:,iblk), & fresh (:,:,iblk), fhocn (:,:,iblk)) - endif + endif !echmod call ice_timer_stop(timer_couple) ! atm/ocn coupling @@ -594,10 +594,10 @@ end subroutine coupling_prep !======================================================================= ! ! If surface heat fluxes are provided to CICE instead of CICE calculating -! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can ! be provided at points which do not have ice. (This is could be due to ! the heat fluxes being calculated on a lower resolution grid or the -! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! heat fluxes not recalculated at every CICE timestep.) At ice free points, ! conserve energy and water by passing these fluxes to the ocean. ! ! author: A. McLaren, Met Office @@ -631,7 +631,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & i, j, n ! horizontal indices - + real (kind=dbl_kind) :: & puny, & ! Lsub, & ! @@ -658,7 +658,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & enddo ! j enddo ! n -#endif +#endif end subroutine sfcflux_to_ocn diff --git a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 index b33886954..cfc5bece9 100644 --- a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 @@ -193,11 +193,11 @@ subroutine cice_init(mpicom_ice) if (icepack_warnings_aborted()) call abort_ice(trim(subname), & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -241,7 +241,7 @@ subroutine cice_init(mpicom_ice) call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions if (my_task == master_task) then call ice_memusage_print(nu_diag,subname//':end') @@ -332,7 +332,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -343,17 +343,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -364,7 +364,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -375,7 +375,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -389,7 +389,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -403,7 +403,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -418,7 +418,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -469,7 +469,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 index 80ff3bd46..b96086c6d 100644 --- a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 @@ -7,7 +7,7 @@ ! William H. Lipscomb, LANL ! ! 2006 ECH: moved exit timeLoop to prevent execution of unnecessary timestep -! 2006 ECH: Streamlined for efficiency +! 2006 ECH: Streamlined for efficiency ! 2006 ECH: Converted to free source form (F90) ! 2007 BPB: Modified Delta-Eddington shortwave interface ! 2008 ECH: moved ESMF code to its own driver @@ -110,7 +110,7 @@ subroutine CICE_Run ! end of timestep loop !-------------------------------------------------------------------- - call ice_timer_stop(timer_step) ! end timestepping loop timer + call ice_timer_stop(timer_step) ! end timestepping loop timer end subroutine CICE_Run @@ -157,7 +157,7 @@ subroutine ice_step use ice_prescribed_mod integer (kind=int_kind) :: & - iblk , & ! block index + iblk , & ! block index k , & ! dynamics supercycling index ktherm ! thermodynamics is off when ktherm = -1 @@ -413,7 +413,7 @@ subroutine ice_step if (tr_iso) call write_restart_iso if (tr_aero) call write_restart_aero if (solve_zsal .or. skl_bgc .or. z_tracers) & - call write_restart_bgc + call write_restart_bgc if (tr_brine) call write_restart_hbrine if (kdyn == 2) call write_restart_eap call final_restart @@ -421,7 +421,7 @@ subroutine ice_step call ice_timer_stop(timer_readwrite) ! reading/writing end subroutine ice_step - + !======================================================================= ! ! Prepare for coupling @@ -456,12 +456,12 @@ subroutine coupling_prep (iblk) use ice_step_mod, only: ocean_mixed_layer use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop - integer (kind=int_kind), intent(in) :: & - iblk ! block index + integer (kind=int_kind), intent(in) :: & + iblk ! block index ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain n , & ! thickness category index i,j , & ! horizontal indices @@ -599,8 +599,8 @@ subroutine coupling_prep (iblk) fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) - fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) - fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) + fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) + fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) if (nbtrcr > 0) then do k = 1, nbtrcr @@ -621,7 +621,7 @@ subroutine coupling_prep (iblk) enddo !----------------------------------------------------------------- - ! Divide fluxes by ice area + ! Divide fluxes by ice area ! - the CESM coupler assumes fluxes are per unit ice area ! - also needed for global budget in diagnostics !----------------------------------------------------------------- @@ -652,7 +652,7 @@ subroutine coupling_prep (iblk) fiso_evap=fiso_evap(:,:,:,iblk), & fiso_ocn =fiso_ocn (:,:,:,iblk), & Uref=Uref(:,:,iblk), wind=wind(:,:,iblk) ) - + if (nbtrcr > 0 .or. skl_bgc) then call bgcflux_ice_to_ocn (nx_block, ny_block, & flux_bio(:,:,1:nbtrcr,iblk), & @@ -669,16 +669,16 @@ subroutine coupling_prep (iblk) if (.not. calc_Tsfc) then !--------------------------------------------------------------- - ! If surface fluxes were provided, conserve these fluxes at ice - ! free points by passing to ocean. + ! If surface fluxes were provided, conserve these fluxes at ice + ! free points by passing to ocean. !--------------------------------------------------------------- - call sfcflux_to_ocn & + call sfcflux_to_ocn & (nx_block, ny_block, & tmask (:,:,iblk), aice_init(:,:,iblk), & fsurfn_f (:,:,:,iblk), flatn_f(:,:,:,iblk), & fresh (:,:,iblk), fhocn (:,:,iblk)) - endif + endif !echmod call ice_timer_stop(timer_couple,iblk) ! atm/ocn coupling @@ -688,10 +688,10 @@ end subroutine coupling_prep !======================================================================= ! ! If surface heat fluxes are provided to CICE instead of CICE calculating -! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can ! be provided at points which do not have ice. (This is could be due to ! the heat fluxes being calculated on a lower resolution grid or the -! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! heat fluxes not recalculated at every CICE timestep.) At ice free points, ! conserve energy and water by passing these fluxes to the ocean. ! ! author: A. McLaren, Met Office @@ -724,7 +724,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & i, j, n ! horizontal indices - + real (kind=dbl_kind) :: & puny, & ! Lsub, & ! diff --git a/cicecore/drivers/mct/cesm1/CICE_copyright.txt b/cicecore/drivers/mct/cesm1/CICE_copyright.txt index 6eb3c9cca..14d97c481 100644 --- a/cicecore/drivers/mct/cesm1/CICE_copyright.txt +++ b/cicecore/drivers/mct/cesm1/CICE_copyright.txt @@ -1,17 +1,17 @@ ! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. -! +! ! Copyright 2022. Triad National Security, LLC. This software was -! produced under U.S. Government contract DE-AC52-06NA25396 for Los +! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad -! National Security, LLC for the U.S. Department of Energy. The U.S. -! Government has rights to use, reproduce, and distribute this software. -! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY +! National Security, LLC for the U.S. Department of Energy. The U.S. +! Government has rights to use, reproduce, and distribute this software. +! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY ! WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF -! THIS SOFTWARE. If software is modified to produce derivative works, -! such modified software should be clearly marked, so as not to confuse +! THIS SOFTWARE. If software is modified to produce derivative works, +! such modified software should be clearly marked, so as not to confuse ! it with the version available from LANL. ! ! The full license and distribution policy are available from ! https://github.com/CICE-Consortium -! +! diff --git a/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 b/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 index b0a78bfcd..454895410 100644 --- a/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 +++ b/cicecore/drivers/mct/cesm1/ice_comp_esmf.F90 @@ -56,7 +56,7 @@ module ice_comp_esmf use ice_timers use ice_kinds_mod, only : int_kind, dbl_kind, char_len_long, log_kind - use ice_boundary, only : ice_HaloUpdate + use ice_boundary, only : ice_HaloUpdate use ice_scam, only : scmlat, scmlon, single_column use ice_fileunits, only : nu_diag, inst_index, inst_name, inst_suffix, & release_all_fileunits @@ -89,7 +89,7 @@ module ice_comp_esmf ! ! !PRIVATE VARIABLES - integer (kind=int_kind) :: ICEID + integer (kind=int_kind) :: ICEID type(mct_gGrid) :: dom_i type(mct_gsMap) :: gsMap_i @@ -140,7 +140,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) ! ! !DESCRIPTION: ! Initialize thermodynamic ice model and obtain relevant atmospheric model -! arrays back from driver +! arrays back from driver ! ! !USES: @@ -168,7 +168,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) integer :: xoff,yoff integer :: nxg,nyg integer :: k, iblk - + character(len=256) :: drvarchdir ! driver archive directory character(len=32) :: starttype ! infodata start type integer :: start_ymd ! Start date (YYYYMMDD) @@ -207,7 +207,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) call ice_cpl_indices_set() - ! duplicate the mpi communicator from the current VM + ! duplicate the mpi communicator from the current VM call ESMF_VMGetCurrent(vm, rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) @@ -218,7 +218,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) if(rc /= 0) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) ! Initialize cice id - + call ESMF_AttributeGet(export_state, name="ID", value=ICEID, rc=rc) if (rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) @@ -324,14 +324,14 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) ! use EClock to reset calendar information on initial start !--------------------------------------------------------------------------- - ! - on restart run + ! - on restart run ! - istep0, time and time_forc are read from restart file ! - istep1 is set to istep0 ! - idate is determined from time via the call to calendar (see below) - ! - on initial run + ! - on initial run ! - iyear, month and mday obtained from sync clock ! - time determined from iyear, month and mday - ! - istep0 and istep1 are set to 0 + ! - istep0 and istep1 are set to 0 call seq_timemgr_EClockGetData(EClock, & start_ymd=start_ymd, start_tod=start_tod, & @@ -384,7 +384,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) call calendar(time) ! update calendar info if (write_ic) call accum_hist(dt) ! write initial conditions - + !--------------------------------------------------------------------------- ! Initialize MCT attribute vectors and indices !--------------------------------------------------------------------------- @@ -413,12 +413,12 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) !----------------------------------------- ! Set arrayspec for dom, l2x and x2l !----------------------------------------- - + call ESMF_ArraySpecSet(arrayspec, rank=2, typekind=ESMF_TYPEKIND_R8, rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) !----------------------------------------- - ! Create dom + ! Create dom !----------------------------------------- nfields = shr_string_listGetNum(trim(seq_flds_dom_fields)) @@ -430,11 +430,11 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) call ESMF_AttributeSet(dom, name="mct_names", value=trim(seq_flds_dom_fields), rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) - ! Set values of dom + ! Set values of dom call ice_domain_esmf(dom) - !----------------------------------------- - ! Create i2x + !----------------------------------------- + ! Create i2x !----------------------------------------- ! 1d undistributed index of fields, 2d is packed data @@ -447,9 +447,9 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) call ESMF_AttributeSet(i2x, name="mct_names", value=trim(seq_flds_i2x_fields), rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) - - !----------------------------------------- - ! Create x2i + + !----------------------------------------- + ! Create x2i !----------------------------------------- nfields = shr_string_listGetNum(trim(seq_flds_x2i_fields)) @@ -461,16 +461,16 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) call ESMF_AttributeSet(x2i, name="mct_names", value=trim(seq_flds_x2i_fields), rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) - !----------------------------------------- - ! Add esmf arrays to import and export state !----------------------------------------- - + ! Add esmf arrays to import and export state + !----------------------------------------- + call ESMF_StateAdd(export_state, (/dom/), rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) call ESMF_StateAdd(export_state, (/i2x/), rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) - + call ESMF_StateAdd(import_state, (/x2i/), rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) @@ -575,7 +575,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) ! Error check if ((tr_aero .and. .not. atm_aero) .or. (tr_zaero .and. .not. atm_aero)) then - write(nu_diag,*) 'ice_import ERROR: atm_aero must be set for tr_aero or tr_zaero' + write(nu_diag,*) 'ice_import ERROR: atm_aero must be set for tr_aero or tr_zaero' call shr_sys_abort() end if @@ -596,7 +596,7 @@ subroutine ice_init_esmf(comp, import_state, export_state, EClock, rc) ! write(shrlogunit,105) trim(subname)//' memory_write: model date = ',start_ymd,start_tod, & ! ' memory = ',msize0,' MB (highwater) ',mrss0,' MB (usage)' ! endif - + 105 format( A, 2i8, A, f10.2, A, f10.2, A) end subroutine ice_init_esmf @@ -668,7 +668,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) call shr_file_getLogUnit (shrlogunit) call shr_file_getLogLevel(shrloglev) call shr_file_setLogUnit (nu_diag) - + ! Determine time of next atmospheric shortwave calculation call ESMF_AttributeGet(export_state, name="nextsw_cday", value=nextsw_cday, rc=rc) @@ -706,7 +706,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) !------------------------------------------------------------------- ! get import state !------------------------------------------------------------------- - + call t_barrierf('cice_run_import_BARRIER',MPI_COMM_ICE) call t_startf ('cice_run_import') @@ -724,7 +724,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) endif call ice_timer_stop(timer_cplrecv) call t_stopf ('cice_run_import') - + !-------------------------------------------------------------------- ! timestep update !-------------------------------------------------------------------- @@ -732,9 +732,9 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) call CICE_Run() !----------------------------------------------------------------- - ! send export state to driver + ! send export state to driver !----------------------------------------------------------------- - + call t_barrierf('cice_run_export_BARRIER',MPI_COMM_ICE) call t_startf ('cice_run_export') call ice_timer_start(timer_cplsend) @@ -752,7 +752,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) endif call ice_timer_stop(timer_cplsend) call t_stopf ('cice_run_export') - + !-------------------------------------------------------------------- ! check that internal clock is in sync with master clock !-------------------------------------------------------------------- @@ -767,7 +767,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) call shr_sys_abort( SubName// & ":: Internal sea-ice clock not in sync with Sync Clock") end if - + ! reset shr logging to my original values call shr_file_setLogUnit (shrlogunit) @@ -776,9 +776,9 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) !------------------------------------------------------------------- ! stop timers and print timer info !------------------------------------------------------------------- - ! Need to have this logic here instead of in ice_final_esmf since + ! Need to have this logic here instead of in ice_final_esmf since ! the ice_final_esmf.F90 will still be called even in aqua-planet mode - ! Could put this logic in the driver - but it seems easier here + ! Could put this logic in the driver - but it seems easier here ! Need to stop this at the end of every run phase in a coupled run. call ice_timer_stop(timer_total) ! stop timing @@ -788,7 +788,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) call ice_timer_print_all(stats=.true.) ! print timing information call release_all_fileunits end if - + ! if(tod == 0) then ! call shr_get_memusage(msize,mrss) ! call shr_mpi_max(mrss, mrss0, MPI_COMM_ICE,trim(subname)//' mrss0') @@ -799,7 +799,7 @@ subroutine ice_run_esmf(comp, import_state, export_state, EClock, rc) ! endif ! endif call t_stopf ('cice_run_total') - + 105 format( A, 2i8, A, f10.2, A, f10.2, A) end subroutine ice_run_esmf @@ -881,12 +881,12 @@ function ice_distgrid_esmf(gsize) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -902,12 +902,12 @@ function ice_distgrid_esmf(gsize) allocate(gindex(lsize)) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -918,7 +918,7 @@ function ice_distgrid_esmf(gsize) enddo !i enddo !j enddo !iblk - + ice_distgrid_esmf = ESMF_DistGridCreate(arbSeqIndexList=gindex, rc=rc) if(rc /= ESMF_SUCCESS) call ESMF_Finalize(rc=rc, endflag=ESMF_END_ABORT) @@ -969,17 +969,17 @@ subroutine ice_domain_esmf( dom ) fptr(:,:) = -9999.0_R8 n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 - fptr(klon, n) = TLON(i,j,iblk)*rad_to_deg - fptr(klat, n) = TLAT(i,j,iblk)*rad_to_deg + fptr(klon, n) = TLON(i,j,iblk)*rad_to_deg + fptr(klat, n) = TLAT(i,j,iblk)*rad_to_deg fptr(karea, n) = tarea(i,j,iblk)/(radius*radius) fptr(kmask, n) = real(nint(hm(i,j,iblk)),kind=dbl_kind) if (trim(grid_type) == 'latlon') then diff --git a/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 b/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 index d663d0f97..a1d1a2ad1 100644 --- a/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 +++ b/cicecore/drivers/mct/cesm1/ice_comp_mct.F90 @@ -53,7 +53,7 @@ module ice_comp_mct use ice_timers use ice_kinds_mod, only : int_kind, dbl_kind, char_len_long, log_kind - use ice_boundary, only : ice_HaloUpdate + use ice_boundary, only : ice_HaloUpdate use ice_scam, only : scmlat, scmlon, single_column use ice_fileunits, only : nu_diag, inst_index, inst_name, inst_suffix, & release_all_fileunits @@ -90,7 +90,7 @@ module ice_comp_mct ! ! !PRIVATE VARIABLES - integer (kind=int_kind) :: ICEID + integer (kind=int_kind) :: ICEID !--- for coupling on other grid from gridcpl_file --- type(mct_gsMap) :: gsMap_iloc ! local gsmaps @@ -115,7 +115,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) ! ! !DESCRIPTION: ! Initialize thermodynamic ice model and obtain relevant atmospheric model -! arrays back from driver +! arrays back from driver ! ! !USES: @@ -139,7 +139,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) integer :: xoff,yoff integer :: nxg,nyg integer :: k, iblk - + type(mct_gsMap) :: gsmap_extend ! local gsmaps character(len=256) :: drvarchdir ! driver archive directory @@ -240,7 +240,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) scmlat = -999. scmlon = -999. - call seq_infodata_GetData( infodata, case_name=runid , & + call seq_infodata_GetData( infodata, case_name=runid , & single_column=single_column ,scmlat=scmlat,scmlon=scmlon) call seq_infodata_GetData( infodata, start_type=starttype) @@ -296,13 +296,13 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) ! use EClock to reset calendar information on initial start !--------------------------------------------------------------------------- - ! - on restart run + ! - on restart run ! - istep0, time and time_forc are read from restart file ! - istep1 is set to istep0 ! - date information is determined from restart - ! - on initial run + ! - on initial run ! - myear, mmonth, mday, msec obtained from sync clock - ! - istep0 and istep1 are set to 0 + ! - istep0 and istep1 are set to 0 call seq_timemgr_EClockGetData(EClock, & start_ymd=start_ymd, start_tod=start_tod, & @@ -352,7 +352,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) call calendar ! update calendar info if (write_ic) call accum_hist(dt) ! write initial conditions - + !--------------------------------------------------------------------------- ! Initialize MCT attribute vectors and indices !--------------------------------------------------------------------------- @@ -362,22 +362,22 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) ! Initialize ice gsMap if (trim(gridcpl_file) == 'unknown_gridcpl_file') then - call ice_SetGSMap_mct( MPI_COMM_ICE, ICEID, GSMap_ice ) + call ice_SetGSMap_mct( MPI_COMM_ICE, ICEID, GSMap_ice ) lsize = mct_gsMap_lsize(gsMap_ice, MPI_COMM_ICE) call ice_domain_mct( lsize, gsMap_ice, dom_i ) other_cplgrid = .false. nxg = nx_global nyg = ny_global else - call ice_SetGSMap_mct( MPI_COMM_ICE, ICEID, GSMap_iloc ) + call ice_SetGSMap_mct( MPI_COMM_ICE, ICEID, GSMap_iloc ) lsize_loc = mct_gsMap_lsize(gsMap_iloc, MPI_COMM_ICE) call ice_domain_mct( lsize_loc, gsMap_iloc, dom_iloc ) - + call ice_setcoupling_mct(MPI_COMM_ICE, ICEID, gsmap_ice, dom_i) call ice_coffset_mct(xoff,yoff,gsmap_iloc,dom_iloc,gsmap_ice,dom_i,MPI_COMM_ICE) call mct_gsmap_clean(gsmap_ice) call mct_gGrid_clean(dom_i) - + call ice_SetGSMap_mct( MPI_COMM_ICE, ICEID, gsmap_extend, xoff, yoff, nxcpl, nycpl) if (lsize_loc /= mct_gsmap_lsize(gsmap_extend,MPI_COMM_ICE)) then write(nu_diag,*) subname,' :: gsmap_extend extended ',lsize_loc, & @@ -398,7 +398,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) call mct_aVect_init(i2x_iloc, rList=seq_flds_i2x_fields, lsize=lsize_loc) call mct_aVect_zero(i2x_iloc) call mct_gsmap_clean(gsmap_extend) - + other_cplgrid = .true. nxg = nxcpl nyg = nycpl @@ -409,7 +409,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) call mct_aVect_init(x2i_i, rList=seq_flds_x2i_fields, lsize=lsize) call mct_aVect_zero(x2i_i) - call mct_aVect_init(i2x_i, rList=seq_flds_i2x_fields, lsize=lsize) + call mct_aVect_init(i2x_i, rList=seq_flds_i2x_fields, lsize=lsize) call mct_aVect_zero(i2x_i) !----------------------------------------------------------------- @@ -448,7 +448,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) ! Error check if ((tr_aero .or. tr_zaero) .and. .not. atm_aero) then - write(nu_diag,*) 'ice_import ERROR: atm_aero must be set for tr_aero/tr_zaero' + write(nu_diag,*) 'ice_import ERROR: atm_aero must be set for tr_aero/tr_zaero' call shr_sys_abort() end if @@ -469,7 +469,7 @@ subroutine ice_init_mct( EClock, cdata_i, x2i_i, i2x_i, NLFilename ) ! write(shrlogunit,105) trim(subname)//': memory_write: model date = ',start_ymd,start_tod, & ! ' memory = ',msize0,' MB (highwater) ',mrss0,' MB (usage)' ! endif - + 105 format( A, 2i8, A, f10.2, A, f10.2, A) end subroutine ice_init_mct @@ -514,7 +514,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) integer :: lbnum integer :: n, myearp type(mct_gGrid) , pointer :: dom_i - type(seq_infodata_type), pointer :: infodata + type(seq_infodata_type), pointer :: infodata type(mct_gsMap) , pointer :: gsMap_i real(r8) :: eccen, obliqr, lambm0, mvelpp character(len=char_len_long) :: fname @@ -542,7 +542,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) call shr_file_getLogUnit (shrlogunit) call shr_file_getLogLevel(shrloglev) call shr_file_setLogUnit (nu_diag) - + call seq_cdata_setptrs(cdata_i, infodata=infodata, dom=dom_i, & gsMap=gsMap_i) @@ -577,7 +577,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) !------------------------------------------------------------------- ! get import state !------------------------------------------------------------------- - + call t_barrierf('cice_run_import_BARRIER',MPI_COMM_ICE) call t_startf ('cice_run_import') call ice_timer_start(timer_cplrecv) @@ -589,7 +589,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) endif call ice_timer_stop(timer_cplrecv) call t_stopf ('cice_run_import') - + !-------------------------------------------------------------------- ! timestep update !-------------------------------------------------------------------- @@ -597,9 +597,9 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) call CICE_Run() !----------------------------------------------------------------- - ! send export state to driver + ! send export state to driver !----------------------------------------------------------------- - + call t_barrierf('cice_run_export_BARRIER',MPI_COMM_ICE) call t_startf ('cice_run_export') call ice_timer_start(timer_cplsend) @@ -612,7 +612,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) endif call ice_timer_stop(timer_cplsend) call t_stopf ('cice_run_export') - + !-------------------------------------------------------------------- ! check that internal clock is in sync with master clock !-------------------------------------------------------------------- @@ -627,7 +627,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) call shr_sys_abort( SubName// & ":: Internal sea-ice clock not in sync with Sync Clock") end if - + ! reset shr logging to my original values call shr_file_setLogUnit (shrlogunit) @@ -636,9 +636,9 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) !------------------------------------------------------------------- ! stop timers and print timer info !------------------------------------------------------------------- - ! Need to have this logic here instead of in ice_final_mct since + ! Need to have this logic here instead of in ice_final_mct since ! the ice_final_mct.F90 will still be called even in aqua-planet mode - ! Could put this logic in the driver - but it seems easier here + ! Could put this logic in the driver - but it seems easier here ! Need to stop this at the end of every run phase in a coupled run. call ice_timer_stop(timer_total) ! stop timing @@ -648,7 +648,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) call ice_timer_print_all(stats=.true.) ! print timing information call release_all_fileunits end if - + ! if(tod == 0) then ! call shr_get_memusage(msize,mrss) ! call shr_mpi_max(mrss, mrss0, MPI_COMM_ICE,trim(subname)//' mrss0') @@ -659,7 +659,7 @@ subroutine ice_run_mct( EClock, cdata_i, x2i_i, i2x_i ) ! endif ! endif call t_stopf ('cice_run_total') - + 105 format( A, 2i8, A, f10.2, A, f10.2, A) end subroutine ice_run_mct @@ -754,12 +754,12 @@ subroutine ice_SetGSMap_mct( mpicom, ID, gsMap_ice, xoff, yoff, nxgin, nygin ) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -771,12 +771,12 @@ subroutine ice_SetGSMap_mct( mpicom, ID, gsMap_ice, xoff, yoff, nxgin, nygin ) allocate(gindex(lsize),stat=ier) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -787,7 +787,7 @@ subroutine ice_SetGSMap_mct( mpicom, ID, gsMap_ice, xoff, yoff, nxgin, nygin ) enddo !i enddo !j enddo !iblk - + call mct_gsMap_init( gsMap_ice, gindex, mpicom, ID, lsize, gsize ) deallocate(gindex) @@ -802,7 +802,7 @@ subroutine ice_domain_mct( lsize, gsMap_i, dom_i ) ! integer , intent(in) :: lsize type(mct_gsMap), intent(in) :: gsMap_i - type(mct_ggrid), intent(inout) :: dom_i + type(mct_ggrid), intent(inout) :: dom_i ! ! Local Variables ! @@ -824,7 +824,7 @@ subroutine ice_domain_mct( lsize, gsMap_i, dom_i ) call mct_gGrid_init( GGrid=dom_i, CoordChars=trim(seq_flds_dom_coord), & OtherChars=trim(seq_flds_dom_other), lsize=lsize ) call mct_aVect_zero(dom_i%data) - ! + ! allocate(data(lsize)) ! ! Determine global gridpoint number attribute, GlobGridNum, which is set automatically by MCT @@ -835,63 +835,63 @@ subroutine ice_domain_mct( lsize, gsMap_i, dom_i ) ! Determine domain (numbering scheme is: West to East and South to North to South pole) ! Initialize attribute vector with special value ! - data(:) = -9999.0_R8 - call mct_gGrid_importRAttr(dom_i,"lat" ,data,lsize) - call mct_gGrid_importRAttr(dom_i,"lon" ,data,lsize) - call mct_gGrid_importRAttr(dom_i,"area" ,data,lsize) - call mct_gGrid_importRAttr(dom_i,"aream",data,lsize) - data(:) = 0.0_R8 - call mct_gGrid_importRAttr(dom_i,"mask",data,lsize) - call mct_gGrid_importRAttr(dom_i,"frac",data,lsize) + data(:) = -9999.0_R8 + call mct_gGrid_importRAttr(dom_i,"lat" ,data,lsize) + call mct_gGrid_importRAttr(dom_i,"lon" ,data,lsize) + call mct_gGrid_importRAttr(dom_i,"area" ,data,lsize) + call mct_gGrid_importRAttr(dom_i,"aream",data,lsize) + data(:) = 0.0_R8 + call mct_gGrid_importRAttr(dom_i,"mask",data,lsize) + call mct_gGrid_importRAttr(dom_i,"frac",data,lsize) ! ! Fill in correct values for domain components ! - data(:) = -9999.0_R8 + data(:) = -9999.0_R8 n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 - data(n) = TLON(i,j,iblk)*rad_to_deg + data(n) = TLON(i,j,iblk)*rad_to_deg enddo !i enddo !j enddo !iblk - call mct_gGrid_importRattr(dom_i,"lon",data,lsize) + call mct_gGrid_importRattr(dom_i,"lon",data,lsize) - data(:) = -9999.0_R8 + data(:) = -9999.0_R8 n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 - data(n) = TLAT(i,j,iblk)*rad_to_deg + data(n) = TLAT(i,j,iblk)*rad_to_deg enddo !i enddo !j enddo !iblk - call mct_gGrid_importRattr(dom_i,"lat",data,lsize) + call mct_gGrid_importRattr(dom_i,"lat",data,lsize) - data(:) = -9999.0_R8 + data(:) = -9999.0_R8 n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -899,17 +899,17 @@ subroutine ice_domain_mct( lsize, gsMap_i, dom_i ) enddo !i enddo !j enddo !iblk - call mct_gGrid_importRattr(dom_i,"area",data,lsize) + call mct_gGrid_importRattr(dom_i,"area",data,lsize) data(:) = 0.0_R8 n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -917,17 +917,17 @@ subroutine ice_domain_mct( lsize, gsMap_i, dom_i ) enddo !i enddo !j enddo !iblk - call mct_gGrid_importRattr(dom_i,"mask",data,lsize) + call mct_gGrid_importRattr(dom_i,"mask",data,lsize) data(:) = 0.0_R8 n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -939,7 +939,7 @@ subroutine ice_domain_mct( lsize, gsMap_i, dom_i ) enddo !i enddo !j enddo !iblk - call mct_gGrid_importRattr(dom_i,"frac",data,lsize) + call mct_gGrid_importRattr(dom_i,"frac",data,lsize) deallocate(data) deallocate(idata) @@ -948,7 +948,7 @@ end subroutine ice_domain_mct !======================================================================= - subroutine ice_setdef_mct( i2x_i ) + subroutine ice_setdef_mct( i2x_i ) !----------------------------------------------------- type(mct_aVect) , intent(inout) :: i2x_i @@ -1196,7 +1196,7 @@ subroutine ice_setcoupling_mct(mpicom_i, ICEID, gsmap_i, dom_i, gsmap_base) start(1) = 1 pe_loc(1) = 0 - do n = 2,npes + do n = 2,npes pe_loc(n) = n-1 start(n) = start(n-1) + length(n-1) enddo @@ -1231,14 +1231,14 @@ subroutine ice_setcoupling_mct(mpicom_i, ICEID, gsmap_i, dom_i, gsmap_base) ! Initialize attribute vector with special value allocate(data(lsize)) - data(:) = -9999.0_R8 - call mct_gGrid_importRAttr(dom_i,"lat" ,data,lsize) - call mct_gGrid_importRAttr(dom_i,"lon" ,data,lsize) - call mct_gGrid_importRAttr(dom_i,"area" ,data,lsize) - call mct_gGrid_importRAttr(dom_i,"aream",data,lsize) - data(:) = 0.0_R8 - call mct_gGrid_importRAttr(dom_i,"mask",data,lsize) - call mct_gGrid_importRAttr(dom_i,"frac",data,lsize) + data(:) = -9999.0_R8 + call mct_gGrid_importRAttr(dom_i,"lat" ,data,lsize) + call mct_gGrid_importRAttr(dom_i,"lon" ,data,lsize) + call mct_gGrid_importRAttr(dom_i,"area" ,data,lsize) + call mct_gGrid_importRAttr(dom_i,"aream",data,lsize) + data(:) = 0.0_R8 + call mct_gGrid_importRAttr(dom_i,"mask",data,lsize) + call mct_gGrid_importRAttr(dom_i,"frac",data,lsize) deallocate(data) ! Read domain arrays diff --git a/cicecore/drivers/mct/cesm1/ice_cpl_indices.F90 b/cicecore/drivers/mct/cesm1/ice_cpl_indices.F90 index 9e549a5ca..6c04271d2 100644 --- a/cicecore/drivers/mct/cesm1/ice_cpl_indices.F90 +++ b/cicecore/drivers/mct/cesm1/ice_cpl_indices.F90 @@ -1,5 +1,5 @@ module ice_cpl_indices - + use seq_flds_mod use mct_mod @@ -7,25 +7,25 @@ module ice_cpl_indices public ! By default make data private - ! ice -> drv + ! ice -> drv integer :: index_i2x_Si_ifrac ! fractional ice coverage wrt ocean integer :: index_i2x_Si_snowh ! snow height (m) - integer :: index_i2x_Si_t ! temperature - integer :: index_i2x_Si_tref ! 2m reference temperature - integer :: index_i2x_Si_qref ! 2m reference specific humidity + integer :: index_i2x_Si_t ! temperature + integer :: index_i2x_Si_tref ! 2m reference temperature + integer :: index_i2x_Si_qref ! 2m reference specific humidity integer :: index_i2x_Si_logz0 ! surface roughness length (m) - integer :: index_i2x_Si_avsdr ! albedo: visible, direct - integer :: index_i2x_Si_avsdf ! albedo: near ir, direct - integer :: index_i2x_Si_anidr ! albedo: visible, diffuse - integer :: index_i2x_Si_anidf ! albedo: near ir, diffuse + integer :: index_i2x_Si_avsdr ! albedo: visible, direct + integer :: index_i2x_Si_avsdf ! albedo: near ir, direct + integer :: index_i2x_Si_anidr ! albedo: visible, diffuse + integer :: index_i2x_Si_anidf ! albedo: near ir, diffuse integer :: index_i2x_Si_u10 ! 10m wind - integer :: index_i2x_Faii_lwup ! upward longwave heat flux - integer :: index_i2x_Faii_lat ! latent heat flux - integer :: index_i2x_Faii_sen ! sensible heat flux - integer :: index_i2x_Faii_evap ! evaporation water flux - integer :: index_i2x_Faii_taux ! wind stress, zonal - integer :: index_i2x_Faii_tauy ! wind stress, meridional + integer :: index_i2x_Faii_lwup ! upward longwave heat flux + integer :: index_i2x_Faii_lat ! latent heat flux + integer :: index_i2x_Faii_sen ! sensible heat flux + integer :: index_i2x_Faii_evap ! evaporation water flux + integer :: index_i2x_Faii_taux ! wind stress, zonal + integer :: index_i2x_Faii_tauy ! wind stress, meridional integer :: index_i2x_Faii_swnet ! sw: net integer :: index_i2x_Fioi_swpen ! sw: net penetrating ice integer :: index_i2x_Fioi_melth ! heat flux from melting ice (<0) @@ -76,14 +76,14 @@ module ice_cpl_indices integer :: index_x2i_So_dhdx ! ocn surface slope, zonal integer :: index_x2i_So_dhdy ! ocn surface slope, meridional integer :: index_x2i_Faxa_lwdn ! downward lw heat flux - integer :: index_x2i_Faxa_rain ! prec: liquid - integer :: index_x2i_Faxa_snow ! prec: frozen + integer :: index_x2i_Faxa_rain ! prec: liquid + integer :: index_x2i_Faxa_snow ! prec: frozen integer :: index_x2i_Faxa_swndr ! sw: nir direct downward integer :: index_x2i_Faxa_swvdr ! sw: vis direct downward integer :: index_x2i_Faxa_swndf ! sw: nir diffuse downward integer :: index_x2i_Faxa_swvdf ! sw: vis diffuse downward integer :: index_x2i_Faxa_swnet ! sw: net - integer :: index_x2i_Fioo_q ! ocn freeze or melt heat + integer :: index_x2i_Fioo_q ! ocn freeze or melt heat integer :: index_x2i_Faxa_bcphidry ! flux: Black Carbon hydrophilic dry deposition integer :: index_x2i_Faxa_bcphodry ! flux: Black Carbon hydrophobic dry deposition integer :: index_x2i_Faxa_bcphiwet ! flux: Black Carbon hydrophilic wet deposition diff --git a/cicecore/drivers/mct/cesm1/ice_import_export.F90 b/cicecore/drivers/mct/cesm1/ice_import_export.F90 index 3acf9bdaa..7ac4f0bb7 100644 --- a/cicecore/drivers/mct/cesm1/ice_import_export.F90 +++ b/cicecore/drivers/mct/cesm1/ice_import_export.F90 @@ -30,7 +30,7 @@ module ice_import_export use ice_domain_size , only: nx_global, ny_global, block_size_x, block_size_y, max_blocks use ice_grid , only: tlon, tlat, tarea, tmask, anglet, hm use ice_grid , only: grid_type, grid_average_X2Y - use ice_boundary , only: ice_HaloUpdate + use ice_boundary , only: ice_HaloUpdate use ice_communicate , only: my_task, master_task, MPI_COMM_ICE, get_num_procs use ice_calendar , only: istep, istep1, diagfreq use ice_fileunits , only: nu_diag @@ -66,11 +66,11 @@ subroutine ice_import( x2i ) integer,parameter :: nflds=17,nfldv=6,nfldb=27 real (kind=dbl_kind),allocatable :: aflds(:,:,:,:) real (kind=dbl_kind) :: workx, worky - real (kind=dbl_kind) :: MIN_RAIN_TEMP, MAX_SNOW_TEMP + real (kind=dbl_kind) :: MIN_RAIN_TEMP, MAX_SNOW_TEMP character(len=char_len) :: tfrz_option logical (kind=log_kind) :: modal_aero, z_tracers, skl_bgc logical (kind=log_kind) :: tr_aero, tr_iage, tr_FY, tr_pond - logical (kind=log_kind) :: tr_lvl, tr_zaero, tr_bgc_Nit + logical (kind=log_kind) :: tr_lvl, tr_zaero, tr_bgc_Nit real (kind=dbl_kind) :: tffresh character(len=*), parameter :: subname = '(ice_import)' !----------------------------------------------------- @@ -102,7 +102,7 @@ subroutine ice_import( x2i ) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -250,7 +250,7 @@ subroutine ice_import( x2i ) deallocate(aflds) !------------------------------------------------------- - ! Set aerosols from coupler + ! Set aerosols from coupler !------------------------------------------------------- allocate(aflds(nx_block,ny_block,nfldb,nblocks)) @@ -258,7 +258,7 @@ subroutine ice_import( x2i ) n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -310,7 +310,7 @@ subroutine ice_import( x2i ) aflds(i,j,7,iblk) = x2i(index_x2i_So_doc, n) * p5 ! split evenly for now aflds(i,j,8,iblk) = x2i(index_x2i_So_doc, n) * p5 !x2i(index_x2i_So_doc2, n) aflds(i,j,9,iblk) = c0 - aflds(i,j,10,iblk) = c0 !x2i(index_x2i_So_dic, n) + aflds(i,j,10,iblk) = c0 !x2i(index_x2i_So_dic, n) aflds(i,j,11,iblk) = x2i(index_x2i_So_don, n) aflds(i,j,12,iblk) = x2i(index_x2i_So_no3, n) aflds(i,j,13,iblk) = x2i(index_x2i_So_sio3, n) @@ -322,12 +322,12 @@ subroutine ice_import( x2i ) aflds(i,j,19,iblk) = c0 !x2i(index_x2i_So_fep2, n) aflds(i,j,20,iblk) = x2i(index_x2i_So_fed, n) aflds(i,j,21,iblk) = c0 !x2i(index_x2i_So_fed2, n) - aflds(i,j,22,iblk) = c0 !x2i(index_x2i_So_zaer1, n) - aflds(i,j,23,iblk) = c0 !x2i(index_x2i_So_zaer2, n) - aflds(i,j,24,iblk) = c0 !x2i(index_x2i_So_zaer3, n) - aflds(i,j,25,iblk) = c0 !x2i(index_x2i_So_zaer4, n) - aflds(i,j,26,iblk) = c0 !x2i(index_x2i_So_zaer5, n) - aflds(i,j,27,iblk) = c0 !x2i(index_x2i_So_zaer6, n) + aflds(i,j,22,iblk) = c0 !x2i(index_x2i_So_zaer1, n) + aflds(i,j,23,iblk) = c0 !x2i(index_x2i_So_zaer2, n) + aflds(i,j,24,iblk) = c0 !x2i(index_x2i_So_zaer3, n) + aflds(i,j,25,iblk) = c0 !x2i(index_x2i_So_zaer4, n) + aflds(i,j,26,iblk) = c0 !x2i(index_x2i_So_zaer5, n) + aflds(i,j,27,iblk) = c0 !x2i(index_x2i_So_zaer6, n) endif enddo enddo @@ -346,7 +346,7 @@ subroutine ice_import( x2i ) do i = 1,nx_block faero_atm(i,j,1,iblk) = aflds(i,j,1,iblk) faero_atm(i,j,2,iblk) = aflds(i,j,2,iblk) - faero_atm(i,j,3,iblk) = aflds(i,j,3,iblk) + faero_atm(i,j,3,iblk) = aflds(i,j,3,iblk) enddo !i enddo !j enddo !iblk @@ -357,7 +357,7 @@ subroutine ice_import( x2i ) do iblk = 1, nblocks do j = 1,ny_block do i = 1,nx_block - algalN(i,j,1,iblk) = aflds(i,j,4,iblk) + algalN(i,j,1,iblk) = aflds(i,j,4,iblk) algalN(i,j,2,iblk) = aflds(i,j,5,iblk) algalN(i,j,3,iblk) = aflds(i,j,6,iblk) doc(i,j,1,iblk) = aflds(i,j,7,iblk) @@ -409,16 +409,16 @@ subroutine ice_import( x2i ) do i = 1,nx_block ! ocean - workx = uocn (i,j,iblk) ! currents, m/s + workx = uocn (i,j,iblk) ! currents, m/s worky = vocn (i,j,iblk) - uocn(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & ! convert to POP grid + uocn(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & ! convert to POP grid + worky*sin(ANGLET(i,j,iblk)) vocn(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & - workx*sin(ANGLET(i,j,iblk)) workx = ss_tltx (i,j,iblk) ! sea sfc tilt, m/m worky = ss_tlty (i,j,iblk) - ss_tltx(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & ! convert to POP grid + ss_tltx(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & ! convert to POP grid + worky*sin(ANGLET(i,j,iblk)) ss_tlty(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & - workx*sin(ANGLET(i,j,iblk)) @@ -430,7 +430,7 @@ subroutine ice_import( x2i ) #endif if (tfrz_option == 'minus1p8') then - Tf (i,j,iblk) = -1.8_dbl_kind + Tf (i,j,iblk) = -1.8_dbl_kind elseif (tfrz_option == 'linear_salt') then Tf (i,j,iblk) = -0.0544_r8*sss(i,j,iblk) ! THIS IS THE ORIGINAL POP FORMULA elseif (tfrz_option == 'mushy') then @@ -463,7 +463,7 @@ subroutine ice_import( x2i ) !$OMP END PARALLEL DO call t_stopf ('cice_imp_ocn') - ! Interpolate ocean dynamics variables from T-cell centers to + ! Interpolate ocean dynamics variables from T-cell centers to ! U-cell centers. if (.not.prescribed_ice) then @@ -487,7 +487,7 @@ subroutine ice_import( x2i ) ! atmosphere workx = uatm(i,j,iblk) ! wind velocity, m/s - worky = vatm(i,j,iblk) + worky = vatm(i,j,iblk) uatm (i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & ! convert to POP grid + worky*sin(ANGLET(i,j,iblk)) ! note uatm, vatm, wind vatm (i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & ! are on the T-grid here @@ -506,7 +506,7 @@ end subroutine ice_import !=============================================================================== - subroutine ice_export( i2x ) + subroutine ice_export( i2x ) !----------------------------------------------------- ! @@ -514,7 +514,7 @@ subroutine ice_export( i2x ) real(r8), intent(inout) :: i2x(:,:) ! ! Local Variables - integer :: i, j, iblk, n, ij + integer :: i, j, iblk, n, ij integer :: ilo, ihi, jlo, jhi !beginning and end of physical domain integer (kind=int_kind) :: icells ! number of ocean/ice cells integer (kind=int_kind), dimension (nx_block*ny_block) :: indxi ! compressed indices in i @@ -532,7 +532,7 @@ subroutine ice_export( i2x ) workx, worky ! tmps for converting grid real (kind=dbl_kind) :: & - vonkar, zref, iceruf, tffresh + vonkar, zref, iceruf, tffresh type(block) :: this_block ! block information for current block integer :: icnt,icnt1,iblk1,icnt1sum,icnt1max ! gridcell and block counters @@ -614,7 +614,7 @@ subroutine ice_export( i2x ) icnt1 = 0 iblk1 = 0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -630,12 +630,12 @@ subroutine ice_export( i2x ) if ( tmask(i,j,iblk)) i2x(:,n) = c0 - !-------states-------------------- - i2x(index_i2x_Si_ifrac ,n) = ailohi(i,j,iblk) + !-------states-------------------- + i2x(index_i2x_Si_ifrac ,n) = ailohi(i,j,iblk) if ( tmask(i,j,iblk) .and. ailohi(i,j,iblk) > c0 ) then icnt = icnt + 1 - !-------states-------------------- + !-------states-------------------- i2x(index_i2x_Si_t ,n) = Tsrf(i,j,iblk) i2x(index_i2x_Si_avsdr ,n) = alvdr(i,j,iblk) i2x(index_i2x_Si_anidr ,n) = alidr(i,j,iblk) @@ -659,17 +659,17 @@ subroutine ice_export( i2x ) endif !--- a/i fluxes computed by ice - i2x(index_i2x_Faii_taux ,n) = tauxa(i,j,iblk) - i2x(index_i2x_Faii_tauy ,n) = tauya(i,j,iblk) - i2x(index_i2x_Faii_lat ,n) = flat(i,j,iblk) - i2x(index_i2x_Faii_sen ,n) = fsens(i,j,iblk) - i2x(index_i2x_Faii_lwup ,n) = flwout(i,j,iblk) - i2x(index_i2x_Faii_evap ,n) = evap(i,j,iblk) + i2x(index_i2x_Faii_taux ,n) = tauxa(i,j,iblk) + i2x(index_i2x_Faii_tauy ,n) = tauya(i,j,iblk) + i2x(index_i2x_Faii_lat ,n) = flat(i,j,iblk) + i2x(index_i2x_Faii_sen ,n) = fsens(i,j,iblk) + i2x(index_i2x_Faii_lwup ,n) = flwout(i,j,iblk) + i2x(index_i2x_Faii_evap ,n) = evap(i,j,iblk) i2x(index_i2x_Faii_swnet,n) = fswabs(i,j,iblk) !--- i/o fluxes computed by ice i2x(index_i2x_Fioi_melth,n) = fhocn(i,j,iblk) - i2x(index_i2x_Fioi_swpen,n) = fswthru(i,j,iblk) ! hf from melting + i2x(index_i2x_Fioi_swpen,n) = fswthru(i,j,iblk) ! hf from melting i2x(index_i2x_Fioi_meltw,n) = fresh(i,j,iblk) ! h2o flux from melting ??? i2x(index_i2x_Fioi_salt ,n) = fsalt(i,j,iblk) ! salt flux from melting ??? i2x(index_i2x_Fioi_taux ,n) = tauxo(i,j,iblk) ! stress : i/o zonal ??? @@ -680,18 +680,18 @@ subroutine ice_export( i2x ) if (index_i2x_Fioi_diat > 0) i2x(index_i2x_Fioi_diat ,n) = falgalN(i,j,1,iblk) * R_C2N(1) if (index_i2x_Fioi_sp > 0) i2x(index_i2x_Fioi_sp ,n) = falgalN(i,j,2,iblk) * R_C2N(2) if (index_i2x_Fioi_phaeo > 0) i2x(index_i2x_Fioi_phaeo ,n) = falgalN(i,j,3,iblk) * R_C2N(3) - if (index_i2x_Fioi_doc > 0) i2x(index_i2x_Fioi_doc ,n) = fdoc(i,j,1,iblk) + fdoc(i,j,2,iblk) - if (index_i2x_Fioi_doc2 > 0) i2x(index_i2x_Fioi_doc2 ,n) = c0 !fdoc(i,j,2,iblk) + if (index_i2x_Fioi_doc > 0) i2x(index_i2x_Fioi_doc ,n) = fdoc(i,j,1,iblk) + fdoc(i,j,2,iblk) + if (index_i2x_Fioi_doc2 > 0) i2x(index_i2x_Fioi_doc2 ,n) = c0 !fdoc(i,j,2,iblk) if (index_i2x_Fioi_doc3 > 0) i2x(index_i2x_Fioi_doc3 ,n) = c0 !fdoc(i,j,3,iblk) if (index_i2x_Fioi_dic > 0) i2x(index_i2x_Fioi_dic ,n) = c0 !fdic(i,j,1,iblk) - if (index_i2x_Fioi_don > 0) i2x(index_i2x_Fioi_don ,n) = fdon(i,j,1,iblk) - if (index_i2x_Fioi_no3 > 0) i2x(index_i2x_Fioi_no3 ,n) = fnit(i,j,iblk) - if (index_i2x_Fioi_sio3 > 0) i2x(index_i2x_Fioi_sio3 ,n) = fsil(i,j,iblk) - if (index_i2x_Fioi_nh4 > 0) i2x(index_i2x_Fioi_nh4 ,n) = famm(i,j,iblk) - if (index_i2x_Fioi_dms > 0) i2x(index_i2x_Fioi_dms ,n) = fdms(i,j,iblk) - if (index_i2x_Fioi_dmspp > 0) i2x(index_i2x_Fioi_dmspp ,n) = c0 - if (index_i2x_Fioi_dmsp > 0) i2x(index_i2x_Fioi_dmsp ,n) = fdmsp(i,j,iblk) - if (index_i2x_Fioi_donr > 0) i2x(index_i2x_Fioi_donr ,n) = fhum(i,j,iblk) + if (index_i2x_Fioi_don > 0) i2x(index_i2x_Fioi_don ,n) = fdon(i,j,1,iblk) + if (index_i2x_Fioi_no3 > 0) i2x(index_i2x_Fioi_no3 ,n) = fnit(i,j,iblk) + if (index_i2x_Fioi_sio3 > 0) i2x(index_i2x_Fioi_sio3 ,n) = fsil(i,j,iblk) + if (index_i2x_Fioi_nh4 > 0) i2x(index_i2x_Fioi_nh4 ,n) = famm(i,j,iblk) + if (index_i2x_Fioi_dms > 0) i2x(index_i2x_Fioi_dms ,n) = fdms(i,j,iblk) + if (index_i2x_Fioi_dmspp > 0) i2x(index_i2x_Fioi_dmspp ,n) = c0 + if (index_i2x_Fioi_dmsp > 0) i2x(index_i2x_Fioi_dmsp ,n) = fdmsp(i,j,iblk) + if (index_i2x_Fioi_donr > 0) i2x(index_i2x_Fioi_donr ,n) = fhum(i,j,iblk) ! convert from umol Fe/m^3 to mmol Fe/m^3 if (index_i2x_Fioi_fep1 > 0) i2x(index_i2x_Fioi_fep1 ,n) = c0 !ffep(i,j,1,iblk) / 1000.0_dbl_kind if (index_i2x_Fioi_fep2 > 0) i2x(index_i2x_Fioi_fep2 ,n) = c0 !ffep(i,j,2,iblk) / 1000.0_dbl_kind diff --git a/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 b/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 index 0868ef2fa..78b7d15c4 100644 --- a/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 +++ b/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 @@ -1,5 +1,5 @@ !=================================================================== -!BOP +!BOP ! ! !MODULE: ice_prescribed_mod - Prescribed Ice Model ! @@ -19,7 +19,7 @@ ! 2005-Apr-19 - B. Kauffman, J. Schramm, M. Vertenstein, NCAR - design ! ! !INTERFACE: ---------------------------------------------------------- - + module ice_prescribed_mod ! !USES: @@ -72,7 +72,7 @@ module ice_prescribed_mod integer(SHR_KIND_IN),parameter :: nFilesMaximum = 400 ! max number of files integer(kind=int_kind) :: stream_year_first ! first year in stream to use integer(kind=int_kind) :: stream_year_last ! last year in stream to use - integer(kind=int_kind) :: model_year_align ! align stream_year_first + integer(kind=int_kind) :: model_year_align ! align stream_year_first ! with this model year character(len=char_len_long) :: stream_fldVarName @@ -88,7 +88,7 @@ module ice_prescribed_mod type(shr_strdata_type) :: sdat ! prescribed data stream character(len=char_len_long) :: fldList ! list of fields in data stream - real(kind=dbl_kind), allocatable :: ice_cov(:,:,:) ! ice cover + real(kind=dbl_kind), allocatable :: ice_cov(:,:,:) ! ice cover ! real (kind=dbl_kind), parameter :: & ! cp_sno = 0.0_dbl_kind & ! specific heat of snow (J/kg/K) @@ -109,13 +109,13 @@ module ice_prescribed_mod ! ! !IROUTINE: ice_prescribed_init - prescribed ice initialization ! -! !INTERFACE: +! !INTERFACE: subroutine ice_prescribed_init(compid, gsmap, dom) use mpi ! MPI Fortran module use shr_pio_mod, only : shr_pio_getiotype, shr_pio_getiosys ! !DESCRIPTION: -! Prescribed ice initialization - needed to -! work with new shr_strdata module derived type +! Prescribed ice initialization - needed to +! work with new shr_strdata module derived type ! ! !REVISION HISTORY: ! 2009-Oct-12 - M. Vertenstein @@ -130,7 +130,7 @@ subroutine ice_prescribed_init(compid, gsmap, dom) !EOP !----- Local ------ integer(kind=int_kind) :: nml_error ! namelist i/o error flag - integer(kind=int_kind) :: n, nFile, ierr + integer(kind=int_kind) :: n, nFile, ierr character(len=8) :: fillalgo character(len=*), parameter :: subname = '(ice_prescribed_init)' character(*),parameter :: F00 = "(4a)" @@ -227,9 +227,9 @@ subroutine ice_prescribed_init(compid, gsmap, dom) if (my_task == master_task) then write(nu_diag,*) ' ' write(nu_diag,*) 'This is the prescribed ice coverage option.' - write(nu_diag,*) ' stream_year_first = ',stream_year_first - write(nu_diag,*) ' stream_year_last = ',stream_year_last - write(nu_diag,*) ' model_year_align = ',model_year_align + write(nu_diag,*) ' stream_year_first = ',stream_year_first + write(nu_diag,*) ' stream_year_last = ',stream_year_last + write(nu_diag,*) ' model_year_align = ',model_year_align write(nu_diag,*) ' stream_fldVarName = ',trim(stream_fldVarName) do n = 1,nFile write(nu_diag,*) ' stream_fldFileName = ',trim(stream_fldFileName(n)),n @@ -280,7 +280,7 @@ subroutine ice_prescribed_init(compid, gsmap, dom) hin_max(1) = 999._dbl_kind end if end subroutine ice_prescribed_init - + !======================================================================= !BOP =================================================================== ! @@ -316,7 +316,7 @@ subroutine ice_prescribed_run(mDateIn, secIn) logical, save :: first_time = .true. character(len=*), parameter :: subname = '(ice_prescribed_run)' character(*),parameter :: F00 = "(a,2g20.13)" - + !------------------------------------------------------------------------ ! Interpolate to new ice coverage !------------------------------------------------------------------------ @@ -327,16 +327,16 @@ subroutine ice_prescribed_run(mDateIn, secIn) allocate(ice_cov(nx_block,ny_block,max_blocks)) endif - ice_cov(:,:,:) = c0 ! This initializes ghost cells as well + ice_cov(:,:,:) = c0 ! This initializes ghost cells as well n=0 do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = jlo, jhi do i = ilo, ihi n = n+1 @@ -384,11 +384,11 @@ end subroutine ice_prescribed_run ! 2001-May - B. P. Briegleb - Original version ! ! !INTERFACE: ------------------------------------------------------------------ - + subroutine ice_prescribed_phys ! !USES: - + use ice_flux use ice_state use ice_arrays_column, only : hin_max @@ -396,9 +396,9 @@ subroutine ice_prescribed_phys use ice_dyn_evp implicit none - + ! !INPUT/OUTPUT PARAMETERS: - + !EOP !----- Local ------ @@ -411,12 +411,12 @@ subroutine ice_prescribed_phys real(kind=dbl_kind) :: slope ! diff in underlying ocean tmp and ice surface tmp real(kind=dbl_kind) :: Ti ! ice level temperature real(kind=dbl_kind) :: Tmlt ! ice level melt temperature - real(kind=dbl_kind) :: qin_save(nilyr) + real(kind=dbl_kind) :: qin_save(nilyr) real(kind=dbl_kind) :: qsn_save(nslyr) real(kind=dbl_kind) :: hi ! ice prescribed (hemispheric) ice thickness real(kind=dbl_kind) :: hs ! snow thickness real(kind=dbl_kind) :: zn ! normalized ice thickness - real(kind=dbl_kind) :: salin(nilyr) ! salinity (ppt) + real(kind=dbl_kind) :: salin(nilyr) ! salinity (ppt) real(kind=dbl_kind) :: rad_to_deg, pi, puny real(kind=dbl_kind) :: rhoi, rhos, cp_ice, cp_ocn, lfresh, depressT @@ -444,11 +444,11 @@ subroutine ice_prescribed_phys ! aicen(:,:,:,:) = c0 ! vicen(:,:,:,:) = c0 ! eicen(:,:,:,:) = c0 - + ! do nc=1,ncat ! trcrn(:,:,nt_Tsfc,nc,:) = Tf(:,:,:) ! enddo - + !----------------------------------------------------------------- ! Set ice cover over land to zero, not sure if this should be ! be done earier, before time/spatial interp?????? @@ -502,8 +502,8 @@ subroutine ice_prescribed_phys endif aicen(i,j,nc,iblk) = ice_cov(i,j,iblk) - vicen(i,j,nc,iblk) = hi*aicen(i,j,nc,iblk) - vsnon(i,j,nc,iblk) = hs*aicen(i,j,nc,iblk) + vicen(i,j,nc,iblk) = hi*aicen(i,j,nc,iblk) + vsnon(i,j,nc,iblk) = hs*aicen(i,j,nc,iblk) !--------------------------------------------------------- ! make linear temp profile and compute enthalpy @@ -564,7 +564,7 @@ subroutine ice_prescribed_phys trcr_base = trcr_base(1:ntrcr,:), & n_trcr_strata = n_trcr_strata(1:ntrcr), & nt_strata = nt_strata(1:ntrcr,:)) - + enddo ! i enddo ! j enddo ! iblk diff --git a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 index 22234d27f..c68583648 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 @@ -231,7 +231,7 @@ subroutine ice_step !----------------------------------------------------------------- ! thermodynamics and biogeochemistry !----------------------------------------------------------------- - + call step_therm1 (dt, iblk) ! vertical thermodynamics if (debug_model) then plabeld = 'post step_therm1' @@ -401,7 +401,7 @@ subroutine ice_step call ice_timer_stop(timer_readwrite) ! reading/writing end subroutine ice_step - + !======================================================================= ! ! Prepare for coupling @@ -634,7 +634,7 @@ subroutine coupling_prep (iblk) fiso_evap=fiso_evap(:,:,:,iblk), & fiso_ocn =fiso_ocn (:,:,:,iblk), & Uref=Uref(:,:,iblk), wind=wind(:,:,iblk) ) - + !----------------------------------------------------------------- ! Define ice-ocean bgc fluxes !----------------------------------------------------------------- @@ -710,7 +710,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & i, j, n ! horizontal indices - + real (kind=dbl_kind) :: & puny, & ! Lsub, & ! diff --git a/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt b/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt index 6eb3c9cca..14d97c481 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt +++ b/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt @@ -1,17 +1,17 @@ ! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. -! +! ! Copyright 2022. Triad National Security, LLC. This software was -! produced under U.S. Government contract DE-AC52-06NA25396 for Los +! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad -! National Security, LLC for the U.S. Department of Energy. The U.S. -! Government has rights to use, reproduce, and distribute this software. -! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY +! National Security, LLC for the U.S. Department of Energy. The U.S. +! Government has rights to use, reproduce, and distribute this software. +! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY ! WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF -! THIS SOFTWARE. If software is modified to produce derivative works, -! such modified software should be clearly marked, so as not to confuse +! THIS SOFTWARE. If software is modified to produce derivative works, +! such modified software should be clearly marked, so as not to confuse ! it with the version available from LANL. ! ! The full license and distribution policy are available from ! https://github.com/CICE-Consortium -! +! diff --git a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 index 56287feb1..0b1b9349c 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 @@ -202,7 +202,7 @@ subroutine ice_mesh_set_distgrid(localpet, npes, distgrid, rc) deallocate(gindex) end subroutine ice_mesh_set_distgrid - + !======================================================================= subroutine ice_mesh_setmask_from_maskfile(ice_maskfile, ice_mesh, rc) @@ -429,7 +429,7 @@ subroutine ice_mesh_create_scolumn(scol_lon, scol_lat, ice_mesh, rc) ! Allocate module variable ocn_gridcell_frac allocate(ocn_gridcell_frac(nx_block,ny_block,max_blocks)) ocn_gridcell_frac(:,:,:) = scol_frac - + end subroutine ice_mesh_create_scolumn !=============================================================================== diff --git a/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 index dc40177d8..84973e9dd 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 @@ -175,7 +175,7 @@ subroutine ice_prescribed_init(clock, mesh, rc) end do write(nu_diag,*) ' ' endif - + ! initialize sdat call shr_strdata_init_from_inline(sdat, & my_task = my_task, & diff --git a/cicecore/drivers/nuopc/dmi/CICE.F90 b/cicecore/drivers/nuopc/dmi/CICE.F90 index d6a28c3ba..79dd06fca 100644 --- a/cicecore/drivers/nuopc/dmi/CICE.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE.F90 @@ -1,21 +1,21 @@ !======================================================================= ! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. -! +! ! Copyright 2022. Triad National Security, LLC. This software was -! produced under U.S. Government contract DE-AC52-06NA25396 for Los +! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad -! National Security, LLC for the U.S. Department of Energy. The U.S. -! Government has rights to use, reproduce, and distribute this software. -! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY +! National Security, LLC for the U.S. Department of Energy. The U.S. +! Government has rights to use, reproduce, and distribute this software. +! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY ! WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF -! THIS SOFTWARE. If software is modified to produce derivative works, -! such modified software should be clearly marked, so as not to confuse +! THIS SOFTWARE. If software is modified to produce derivative works, +! such modified software should be clearly marked, so as not to confuse ! it with the version available from LANL. ! ! The full license and distribution policy are available from ! https://github.com/CICE-Consortium -! +! !======================================================================= ! ! Main driver routine for CICE. Initializes and steps through the model. @@ -49,7 +49,7 @@ program icemodel call CICE_Run !----------------------------------------------------------------- - ! Finalize CICE + ! Finalize CICE !----------------------------------------------------------------- call CICE_Finalize diff --git a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 index 2c90061af..78d462d4c 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 @@ -196,11 +196,11 @@ subroutine cice_init(mpi_comm) if (icepack_warnings_aborted()) call abort_ice(trim(subname), & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -252,7 +252,7 @@ subroutine cice_init(mpi_comm) call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions end subroutine cice_init @@ -339,7 +339,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -350,17 +350,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -371,7 +371,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -382,7 +382,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -396,7 +396,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -410,7 +410,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -425,7 +425,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -476,7 +476,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 index 00c527da0..6e799723e 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 @@ -7,7 +7,7 @@ ! William H. Lipscomb, LANL ! ! 2006 ECH: moved exit timeLoop to prevent execution of unnecessary timestep -! 2006 ECH: Streamlined for efficiency +! 2006 ECH: Streamlined for efficiency ! 2006 ECH: Converted to free source form (F90) ! 2007 BPB: Modified Delta-Eddington shortwave interface ! 2008 ECH: moved ESMF code to its own driver @@ -131,7 +131,7 @@ subroutine CICE_Run(stop_now_cpl) ! end of timestep loop !-------------------------------------------------------------------- - call ice_timer_stop(timer_step) ! end timestepping loop timer + call ice_timer_stop(timer_step) ! end timestepping loop timer end subroutine CICE_Run @@ -175,7 +175,7 @@ subroutine ice_step timer_hist, timer_readwrite integer (kind=int_kind) :: & - iblk , & ! block index + iblk , & ! block index k , & ! dynamics supercycling index ktherm ! thermodynamics is off when ktherm = -1 @@ -257,7 +257,7 @@ subroutine ice_step !----------------------------------------------------------------- ! thermodynamics and biogeochemistry !----------------------------------------------------------------- - + call step_therm1 (dt, iblk) ! vertical thermodynamics if (debug_model) then @@ -422,7 +422,7 @@ subroutine ice_step if (tr_iso) call write_restart_iso if (tr_aero) call write_restart_aero if (solve_zsal .or. skl_bgc .or. z_tracers) & - call write_restart_bgc + call write_restart_bgc if (tr_brine) call write_restart_hbrine if (kdyn == 2) call write_restart_eap call final_restart @@ -431,7 +431,7 @@ subroutine ice_step call ice_timer_stop(timer_readwrite) ! reading/writing end subroutine ice_step - + !======================================================================= ! ! Prepare for coupling @@ -466,12 +466,12 @@ subroutine coupling_prep (iblk) use ice_step_mod, only: ocean_mixed_layer use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop - integer (kind=int_kind), intent(in) :: & - iblk ! block index + integer (kind=int_kind), intent(in) :: & + iblk ! block index ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain n , & ! thickness category index i,j , & ! horizontal indices @@ -607,8 +607,8 @@ subroutine coupling_prep (iblk) fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) - fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) - fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) + fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) + fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) if (nbtrcr > 0) then do k = 1, nbtrcr @@ -629,7 +629,7 @@ subroutine coupling_prep (iblk) enddo !----------------------------------------------------------------- - ! Divide fluxes by ice area + ! Divide fluxes by ice area ! - the CESM coupler assumes fluxes are per unit ice area ! - also needed for global budget in diagnostics !----------------------------------------------------------------- @@ -659,22 +659,22 @@ subroutine coupling_prep (iblk) Qref_iso =Qref_iso (:,:,:,iblk), & fiso_evap=fiso_evap(:,:,:,iblk), & fiso_ocn =fiso_ocn (:,:,:,iblk)) - + #ifdef CICE_IN_NEMO !echmod - comment this out for efficiency, if .not. calc_Tsfc if (.not. calc_Tsfc) then !--------------------------------------------------------------- - ! If surface fluxes were provided, conserve these fluxes at ice - ! free points by passing to ocean. + ! If surface fluxes were provided, conserve these fluxes at ice + ! free points by passing to ocean. !--------------------------------------------------------------- - call sfcflux_to_ocn & + call sfcflux_to_ocn & (nx_block, ny_block, & tmask (:,:,iblk), aice_init(:,:,iblk), & fsurfn_f (:,:,:,iblk), flatn_f(:,:,:,iblk), & fresh (:,:,iblk), fhocn (:,:,iblk)) - endif + endif !echmod #endif call ice_timer_stop(timer_couple,iblk) ! atm/ocn coupling @@ -686,10 +686,10 @@ end subroutine coupling_prep !======================================================================= ! ! If surface heat fluxes are provided to CICE instead of CICE calculating -! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can ! be provided at points which do not have ice. (This is could be due to ! the heat fluxes being calculated on a lower resolution grid or the -! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! heat fluxes not recalculated at every CICE timestep.) At ice free points, ! conserve energy and water by passing these fluxes to the ocean. ! ! author: A. McLaren, Met Office @@ -722,7 +722,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & i, j, n ! horizontal indices - + real (kind=dbl_kind) :: & puny, & ! Lsub, & ! diff --git a/cicecore/drivers/nuopc/dmi/cice_cap.info b/cicecore/drivers/nuopc/dmi/cice_cap.info index 202207c38..0ec1dea5a 100644 --- a/cicecore/drivers/nuopc/dmi/cice_cap.info +++ b/cicecore/drivers/nuopc/dmi/cice_cap.info @@ -22,7 +22,7 @@ module cice_cap use ice_state use CICE_RunMod use CICE_InitMod - use CICE_FinalMod + use CICE_FinalMod !end cice specific use ESMF use NUOPC @@ -34,11 +34,11 @@ module cice_cap model_label_Finalize => label_Finalize implicit none - + private - + public SetServices - + ! type cice_internalstate_type ! end type @@ -167,7 +167,7 @@ module cice_cap call ESMF_LogWrite(info, ESMF_LOGMSG_INFO, rc=dbrc) end subroutine - + !----------------------------------------------------------------------------- subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) @@ -196,7 +196,7 @@ module cice_cap integer :: lbnd(2),ubnd(2) type(block) :: this_block type(ESMF_DELayout) :: delayout - real(ESMF_KIND_R8), pointer :: tarray(:,:) + real(ESMF_KIND_R8), pointer :: tarray(:,:) real(ESMF_KIND_R8), pointer :: coordXcenter(:,:) real(ESMF_KIND_R8), pointer :: coordYcenter(:,:) real(ESMF_KIND_R8), pointer :: coordXcorner(:,:) @@ -215,7 +215,7 @@ module cice_cap ! created can wrap on the data pointers in internal part of CICE write(tmpstr,'(a,2i8)') subname//' ice nx,ny = ',nx_global,ny_global call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=dbrc) - + ! distgrid = ESMF_DistGridCreate(minIndex=(/1,1/), maxIndex=(/nx_global,ny_global/), & ! regDecomp=(/2,2/), rc=rc) @@ -307,9 +307,9 @@ module cice_cap rc = rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridAddCoord(gridIn, staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) + call ESMF_GridAddCoord(gridIn, staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return - call ESMF_GridAddCoord(gridIn, staggerLoc=ESMF_STAGGERLOC_CORNER, rc=rc) + call ESMF_GridAddCoord(gridIn, staggerLoc=ESMF_STAGGERLOC_CORNER, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, line=__LINE__, file=__FILE__)) return call ESMF_GridAddItem(gridIn, itemFlag=ESMF_GRIDITEM_MASK, itemTypeKind=ESMF_TYPEKIND_I4, & staggerLoc=ESMF_STAGGERLOC_CENTER, rc=rc) @@ -341,7 +341,7 @@ module cice_cap if (lbnd(1) /= 1 .or. lbnd(2) /= 1 .or. ubnd(1) /= ihi-ilo+1 .or. ubnd(2) /= jhi-jlo+1) then write(tmpstr,'(a,5i8)') subname//' iblk bnds ERROR ' call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, line=__LINE__, file=__FILE__, rc=dbrc) - write(tmpstr,'(a,4i8)') subname//' iblk center bnds 2',ihi, ilo, jhi,jlo + write(tmpstr,'(a,4i8)') subname//' iblk center bnds 2',ihi, ilo, jhi,jlo call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, line=__LINE__, file=__FILE__, rc=dbrc) rc = ESMF_FAILURE return @@ -460,14 +460,14 @@ module cice_cap call ESMF_LogWrite(info, ESMF_LOGMSG_INFO, line=__LINE__, file=__FILE__, rc=dbrc) end subroutine - + !----------------------------------------------------------------------------- ! CICE model uses same clock as parent gridComp subroutine SetClock(gcomp, rc) type(ESMF_GridComp) :: gcomp integer, intent(out) :: rc - + ! local variables type(ESMF_Clock) :: clock type(ESMF_TimeInterval) :: stabilityTimeStep, timestep @@ -493,10 +493,10 @@ module cice_cap line=__LINE__, & file=__FILE__)) & return ! bail out - + ! initialize internal clock ! here: parent Clock and stability timeStep determine actual model timeStep - call ESMF_TimeIntervalSet(stabilityTimeStep, s=nint(dt), rc=rc) + call ESMF_TimeIntervalSet(stabilityTimeStep, s=nint(dt), rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & line=__LINE__, & file=__FILE__)) & @@ -506,7 +506,7 @@ module cice_cap line=__LINE__, & file=__FILE__)) & return ! bail out - + end subroutine !----------------------------------------------------------------------------- @@ -514,7 +514,7 @@ module cice_cap subroutine ModelAdvance(gcomp, rc) type(ESMF_GridComp) :: gcomp integer, intent(out) :: rc - + ! local variables type(ESMF_Clock) :: clock type(ESMF_State) :: importState, exportState @@ -539,7 +539,7 @@ module cice_cap call ESMF_LogWrite(info, ESMF_LOGMSG_INFO, rc=dbrc) import_slice = import_slice + 1 export_slice = export_slice + 1 - + ! query the Component for its clock, importState and exportState call NUOPC_ModelGet(gcomp, modelClock=clock, importState=importState, & exportState=exportState, rc=rc) @@ -549,27 +549,27 @@ module cice_cap return ! bail out ! HERE THE MODEL ADVANCES: currTime -> currTime + timeStep - + ! Because of the way that the internal Clock was set in SetClock(), ! its timeStep is likely smaller than the parent timeStep. As a consequence - ! the time interval covered by a single parent timeStep will result in + ! the time interval covered by a single parent timeStep will result in ! multiple calls to the ModelAdvance() routine. Every time the currTime ! will come in by one internal timeStep advanced. This goes until the ! stopTime of the internal Clock has been reached. - + call ESMF_ClockPrint(clock, options="currTime", & preString="------>Advancing CICE from: ", rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & line=__LINE__, & file=__FILE__)) & return ! bail out - + call ESMF_ClockGet(clock, currTime=currTime, timeStep=timeStep, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & line=__LINE__, & file=__FILE__)) & return ! bail out - + call ESMF_TimePrint(currTime + timeStep, & preString="--------------------------------> to: ", rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & @@ -611,14 +611,14 @@ module cice_cap write(info,*) subname,' --- run phase 4 called --- ',rc call ESMF_LogWrite(info, ESMF_LOGMSG_INFO, rc=dbrc) if(profile_memory) call ESMF_VMLogMemInfo("Leaving CICE Model_ADVANCE: ") - end subroutine + end subroutine subroutine cice_model_finalize(gcomp, rc) ! input arguments type(ESMF_GridComp) :: gcomp integer, intent(out) :: rc - + ! local variables type(ESMF_Clock) :: clock type(ESMF_Time) :: currTime @@ -707,7 +707,7 @@ module cice_cap integer :: npet, nx, ny, pet, elb(2), eub(2), clb(2), cub(2), tlb(2), tub(2) type(ESMF_VM) :: vm character(len=*),parameter :: subname='(cice_cap:CICE_RealizeFields)' - + rc = ESMF_SUCCESS do i = 1, nfields @@ -734,7 +734,7 @@ module cice_cap file=__FILE__)) & return ! bail out endif - + if (NUOPC_IsConnected(state, fieldName=field_defs(i)%shortname)) then call NUOPC_Realize(state, field=field, rc=rc) if (ESMF_LogFoundError(rcToCheck=rc, msg=ESMF_LOGERR_PASSTHRU, & @@ -814,15 +814,15 @@ module cice_cap call fld_list_add(fldsToIce_num, fldsToIce, "freezing_melting_potential" ,"1" , "will provide") call fld_list_add(fldsToIce_num, fldsToIce, "mixed_layer_depth" ,"m" , "will provide") ! fields for export - call fld_list_add(fldsFrIce_num, fldsFrIce, "sea_ice_fraction" ,"1" , "will provide") - call fld_list_add(fldsFrIce_num, fldsFrIce, "stress_on_ocn_ice_zonal" ,"1" , "will provide") - call fld_list_add(fldsFrIce_num, fldsFrIce, "stress_on_ocn_ice_merid" ,"1" , "will provide") - call fld_list_add(fldsFrIce_num, fldsFrIce, "sea_ice_temperature" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "sea_ice_fraction" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "stress_on_ocn_ice_zonal" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "stress_on_ocn_ice_merid" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "sea_ice_temperature" ,"1" , "will provide") ! call fld_list_add(fldsFrIce_num, fldsFrIce, "ice_mask" ,"1" , "will provide") - call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_sw_pen_to_ocn" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_sw_pen_to_ocn" ,"1" , "will provide") call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_fresh_water_to_ocean_rate" ,"1" , "will provide") - call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_salt_rate" ,"1" , "will provide") - call fld_list_add(fldsFrIce_num, fldsFrIce, "net_heat_flx_to_ocn" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_salt_rate" ,"1" , "will provide") + call fld_list_add(fldsFrIce_num, fldsFrIce, "net_heat_flx_to_ocn" ,"1" , "will provide") call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_ice_volume" ,"1" , "will provide") call fld_list_add(fldsFrIce_num, fldsFrIce, "mean_snow_volume" ,"1" , "will provide") @@ -885,9 +885,9 @@ module cice_cap real(kind=ESMF_KIND_R8), pointer :: dataPtr_ocncm(:,:,:) real(kind=ESMF_KIND_R8), pointer :: dataPtr_fmpot(:,:,:) real(kind=ESMF_KIND_R8), pointer :: dataPtr_mld(:,:,:) - integer :: ilo,ihi,jlo,jhi + integer :: ilo,ihi,jlo,jhi integer :: i,j,iblk,n,i1,i2,j1,j2 - real(kind=ESMF_KIND_R8) :: ue, vn, AngT_s + real(kind=ESMF_KIND_R8) :: ue, vn, AngT_s type(block) :: this_block character(len=*),parameter :: subname='(cice_cap:CICE_Import)' @@ -921,7 +921,7 @@ module cice_cap j1 = j - jlo + 1 sss (i,j,iblk) = dataPtr_sss (i1,j1,iblk) ! sea surface salinity (maybe for mushy layer) sst (i,j,iblk) = dataPtr_sst (i1,j1,iblk) - Tffresh ! sea surface temp (may not be needed?) - + frzmlt (i,j,iblk) = dataPtr_fmpot (i1,j1,iblk) ue = dataPtr_ocncz (i1,j1,iblk) vn = dataPtr_ocncm (i1,j1,iblk) @@ -969,7 +969,7 @@ module cice_cap integer :: ilo,ihi,jlo,jhi integer :: i,j,iblk,n,i1,i2,j1,j2 real(kind=ESMF_KIND_R8) :: ui, vj, angT - + type(block) :: this_block character(len=*),parameter :: subname='(cice_cap:CICE_Export)' !TODO clean up fields @@ -1035,7 +1035,7 @@ module cice_cap ! call ESMF_LogWrite(trim(tmpstr), ESMF_LOGMSG_INFO, rc=dbrc) - + end subroutine end module cice_cap diff --git a/cicecore/drivers/standalone/cice/CICE.F90 b/cicecore/drivers/standalone/cice/CICE.F90 index d6a28c3ba..79dd06fca 100644 --- a/cicecore/drivers/standalone/cice/CICE.F90 +++ b/cicecore/drivers/standalone/cice/CICE.F90 @@ -1,21 +1,21 @@ !======================================================================= ! Copyright (c) 2022, Triad National Security, LLC ! All rights reserved. -! +! ! Copyright 2022. Triad National Security, LLC. This software was -! produced under U.S. Government contract DE-AC52-06NA25396 for Los +! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad -! National Security, LLC for the U.S. Department of Energy. The U.S. -! Government has rights to use, reproduce, and distribute this software. -! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY +! National Security, LLC for the U.S. Department of Energy. The U.S. +! Government has rights to use, reproduce, and distribute this software. +! NEITHER THE GOVERNMENT NOR TRIAD NATIONAL SECURITY, LLC MAKES ANY ! WARRANTY, EXPRESS OR IMPLIED, OR ASSUMES ANY LIABILITY FOR THE USE OF -! THIS SOFTWARE. If software is modified to produce derivative works, -! such modified software should be clearly marked, so as not to confuse +! THIS SOFTWARE. If software is modified to produce derivative works, +! such modified software should be clearly marked, so as not to confuse ! it with the version available from LANL. ! ! The full license and distribution policy are available from ! https://github.com/CICE-Consortium -! +! !======================================================================= ! ! Main driver routine for CICE. Initializes and steps through the model. @@ -49,7 +49,7 @@ program icemodel call CICE_Run !----------------------------------------------------------------- - ! Finalize CICE + ! Finalize CICE !----------------------------------------------------------------- call CICE_Finalize diff --git a/cicecore/drivers/standalone/cice/CICE_FinalMod.F90 b/cicecore/drivers/standalone/cice/CICE_FinalMod.F90 index 28811c3cd..a8b074883 100644 --- a/cicecore/drivers/standalone/cice/CICE_FinalMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_FinalMod.F90 @@ -30,7 +30,6 @@ module CICE_FinalMod subroutine CICE_Finalize - use ice_restart_shared, only: runid use ice_timers, only: ice_timer_stop, ice_timer_print_all, timer_total, & timer_stats diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index 7208da481..07a151a01 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -66,7 +66,7 @@ subroutine cice_init floe_binwidth, c_fsd_range use ice_state, only: alloc_state use ice_flux_bgc, only: alloc_flux_bgc - use ice_calendar, only: dt, dt_dyn, istep, istep1, write_ic, & + use ice_calendar, only: dt, dt_dyn, write_ic, & init_calendar, advance_timestep, calc_timesteps use ice_communicate, only: init_communicate, my_task, master_task use ice_diagnostics, only: init_diags @@ -190,11 +190,11 @@ subroutine cice_init if (icepack_warnings_aborted()) call abort_ice(trim(subname), & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -243,7 +243,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions if (my_task == master_task) then call ice_memusage_print(nu_diag,subname//':end') @@ -334,7 +334,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -345,17 +345,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -366,7 +366,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -377,7 +377,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -391,7 +391,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -405,7 +405,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -420,7 +420,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -471,7 +471,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 index 5547ba765..00c7921d1 100644 --- a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 @@ -7,7 +7,7 @@ ! William H. Lipscomb, LANL ! ! 2006 ECH: moved exit timeLoop to prevent execution of unnecessary timestep -! 2006 ECH: Streamlined for efficiency +! 2006 ECH: Streamlined for efficiency ! 2006 ECH: Converted to free source form (F90) ! 2007 BPB: Modified Delta-Eddington shortwave interface ! 2008 ECH: moved ESMF code to its own driver @@ -45,7 +45,7 @@ module CICE_RunMod subroutine CICE_Run - use ice_calendar, only: istep, istep1, dt, stop_now, advance_timestep + use ice_calendar, only: dt, stop_now, advance_timestep use ice_forcing, only: get_forcing_atmo, get_forcing_ocn, & get_wave_spec use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & @@ -125,7 +125,7 @@ subroutine CICE_Run ! end of timestep loop !-------------------------------------------------------------------- - call ice_timer_stop(timer_step) ! end timestepping loop timer + call ice_timer_stop(timer_step) ! end timestepping loop timer end subroutine CICE_Run @@ -169,7 +169,7 @@ subroutine ice_step timer_hist, timer_readwrite integer (kind=int_kind) :: & - iblk , & ! block index + iblk , & ! block index k , & ! dynamics supercycling index ktherm ! thermodynamics is off when ktherm = -1 @@ -418,7 +418,7 @@ subroutine ice_step if (tr_iso) call write_restart_iso if (tr_aero) call write_restart_aero if (solve_zsal .or. skl_bgc .or. z_tracers) & - call write_restart_bgc + call write_restart_bgc if (tr_brine) call write_restart_hbrine if (kdyn == 2) call write_restart_eap call final_restart @@ -426,7 +426,7 @@ subroutine ice_step call ice_timer_stop(timer_readwrite) ! reading/writing end subroutine ice_step - + !======================================================================= ! ! Prepare for coupling @@ -461,12 +461,12 @@ subroutine coupling_prep (iblk) use ice_step_mod, only: ocean_mixed_layer use ice_timers, only: timer_couple, ice_timer_start, ice_timer_stop - integer (kind=int_kind), intent(in) :: & - iblk ! block index + integer (kind=int_kind), intent(in) :: & + iblk ! block index ! local variables - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ilo,ihi,jlo,jhi, & ! beginning and end of physical domain n , & ! thickness category index i,j , & ! horizontal indices @@ -602,8 +602,8 @@ subroutine coupling_prep (iblk) fsalt_ai (i,j,iblk) = fsalt (i,j,iblk) fhocn_ai (i,j,iblk) = fhocn (i,j,iblk) fswthru_ai(i,j,iblk) = fswthru(i,j,iblk) - fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) - fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) + fzsal_ai (i,j,iblk) = fzsal (i,j,iblk) + fzsal_g_ai(i,j,iblk) = fzsal_g(i,j,iblk) if (nbtrcr > 0) then do k = 1, nbtrcr @@ -624,7 +624,7 @@ subroutine coupling_prep (iblk) enddo !----------------------------------------------------------------- - ! Divide fluxes by ice area + ! Divide fluxes by ice area ! - the CESM coupler assumes fluxes are per unit ice area ! - also needed for global budget in diagnostics !----------------------------------------------------------------- @@ -654,22 +654,22 @@ subroutine coupling_prep (iblk) Qref_iso =Qref_iso (:,:,:,iblk), & fiso_evap=fiso_evap(:,:,:,iblk), & fiso_ocn =fiso_ocn (:,:,:,iblk)) - + #ifdef CICE_IN_NEMO !echmod - comment this out for efficiency, if .not. calc_Tsfc if (.not. calc_Tsfc) then !--------------------------------------------------------------- - ! If surface fluxes were provided, conserve these fluxes at ice - ! free points by passing to ocean. + ! If surface fluxes were provided, conserve these fluxes at ice + ! free points by passing to ocean. !--------------------------------------------------------------- - call sfcflux_to_ocn & + call sfcflux_to_ocn & (nx_block, ny_block, & tmask (:,:,iblk), aice_init(:,:,iblk), & fsurfn_f (:,:,:,iblk), flatn_f(:,:,:,iblk), & fresh (:,:,iblk), fhocn (:,:,iblk)) - endif + endif !echmod #endif call ice_timer_stop(timer_couple,iblk) ! atm/ocn coupling @@ -681,10 +681,10 @@ end subroutine coupling_prep !======================================================================= ! ! If surface heat fluxes are provided to CICE instead of CICE calculating -! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can +! them internally (i.e. .not. calc_Tsfc), then these heat fluxes can ! be provided at points which do not have ice. (This is could be due to ! the heat fluxes being calculated on a lower resolution grid or the -! heat fluxes not recalculated at every CICE timestep.) At ice free points, +! heat fluxes not recalculated at every CICE timestep.) At ice free points, ! conserve energy and water by passing these fluxes to the ocean. ! ! author: A. McLaren, Met Office @@ -717,7 +717,7 @@ subroutine sfcflux_to_ocn(nx_block, ny_block, & ! local variables integer (kind=int_kind) :: & i, j, n ! horizontal indices - + real (kind=dbl_kind) :: & puny, & ! Lsub, & ! diff --git a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 index a252bc1b7..ad355d783 100644 --- a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 @@ -181,11 +181,11 @@ subroutine cice_init if (icepack_warnings_aborted()) call abort_ice(trim(subname), & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -222,7 +222,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions end subroutine cice_init @@ -305,7 +305,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -316,17 +316,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -337,7 +337,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -348,7 +348,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -362,7 +362,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -376,7 +376,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -391,7 +391,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -414,7 +414,7 @@ subroutine init_restart if (restart_iso) then call read_restart_iso else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_isotope(trcrn(:,:,nt_isosno:nt_isosno+n_iso-1,:,iblk), & trcrn(:,:,nt_isoice:nt_isoice+n_iso-1,:,iblk)) enddo ! iblk @@ -426,7 +426,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 index 5a4b3d54e..bd7ed3165 100644 --- a/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 +++ b/cicecore/drivers/unittest/gridavgchk/gridavgchk.F90 @@ -8,7 +8,7 @@ program gridavgchk ! There are lots of issues here ! areas (T, U, N, E) are not locally conservative, affect X2YF ! X2YF is unmasked which can create havoc in U2T type directions - ! X2YS is masked but there can be no active cells to average (for instance, + ! X2YS is masked but there can be no active cells to average (for instance, ! single gridcell wide channels U2T where resuilt is zero) ! land block elimination can lead to missing data on halo ! This test tries to deal with all these things.... @@ -36,7 +36,7 @@ program gridavgchk integer(int_kind) :: i, j, n, ib, ie, jb, je, iblock integer(int_kind) :: iglob, jglob integer(int_kind) :: blockID, numBlocks - type (block) :: this_block + type (block) :: this_block real(dbl_kind) ,allocatable :: array1x(:,:,:), array1y(:,:,:) ! input real(dbl_kind) ,allocatable :: arraysx(:,:,:), arraysy(:,:,:) ! extra input for NE2T, NE2U diff --git a/cicecore/drivers/unittest/optargs/optargs.F90 b/cicecore/drivers/unittest/optargs/optargs.F90 index 14c738d47..4acf7ac9f 100644 --- a/cicecore/drivers/unittest/optargs/optargs.F90 +++ b/cicecore/drivers/unittest/optargs/optargs.F90 @@ -193,7 +193,7 @@ program optargs computeD = .true. ierrV = oa_A + oa_B + oa_C + oa_D Ai1 = 7. - B = 9. + B = 9. Ci1 = 7. Di1 = 12; Di2=3. resultV = 49. @@ -205,7 +205,7 @@ program optargs computeD = .true. ierrV = oa_A + oa_B + oa_D Ai1 = 10. - B = 11. + B = 11. Di1 = 12; Di2=3. resultV = 40. call oa_layer1(Ao=Ao,B=B,Co=Co,Do=Do,Ai1=Ai1,Ci1=Ci1,Di1=Di1,Di2=Di2,ierr=ierr) diff --git a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 index a252bc1b7..ad355d783 100644 --- a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 @@ -181,11 +181,11 @@ subroutine cice_init if (icepack_warnings_aborted()) call abort_ice(trim(subname), & file=__FILE__,line= __LINE__) - if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical !property tables - ! Initialize shortwave components using swdn from previous timestep - ! if restarting. These components will be scaled to current forcing + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing ! in prep_radiation. if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer @@ -222,7 +222,7 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions + if (write_ic) call accum_hist(dt) ! write initial conditions end subroutine cice_init @@ -305,7 +305,7 @@ subroutine init_restart if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (trim(runtype) == 'continue') then + if (trim(runtype) == 'continue') then ! start from core restart file call restartfile() ! given by pointer in ice_in call calendar() ! update time parameters @@ -316,17 +316,17 @@ subroutine init_restart ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file !!! uncomment if EAP restart data exists ! if (kdyn == 2) call read_restart_eap - endif + endif ! tracers - ! ice age tracer - if (tr_iage) then + ! ice age tracer + if (tr_iage) then if (trim(runtype) == 'continue') & restart_age = .true. if (restart_age) then call read_restart_age else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_age(trcrn(:,:,nt_iage,:,iblk)) enddo ! iblk endif @@ -337,7 +337,7 @@ subroutine init_restart if (restart_FY) then call read_restart_FY else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_FY(trcrn(:,:,nt_FY,:,iblk)) enddo ! iblk endif @@ -348,7 +348,7 @@ subroutine init_restart if (restart_lvl) then call read_restart_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & trcrn(:,:,nt_vlvl,:,iblk)) enddo ! iblk @@ -362,7 +362,7 @@ subroutine init_restart if (restart_pond_cesm) then call read_restart_pond_cesm else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk)) enddo ! iblk @@ -376,7 +376,7 @@ subroutine init_restart if (restart_pond_lvl) then call read_restart_pond_lvl else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk), & @@ -391,7 +391,7 @@ subroutine init_restart if (restart_pond_topo) then call read_restart_pond_topo else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & trcrn(:,:,nt_hpnd,:,iblk), & trcrn(:,:,nt_ipnd,:,iblk)) @@ -414,7 +414,7 @@ subroutine init_restart if (restart_iso) then call read_restart_iso else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_isotope(trcrn(:,:,nt_isosno:nt_isosno+n_iso-1,:,iblk), & trcrn(:,:,nt_isoice:nt_isoice+n_iso-1,:,iblk)) enddo ! iblk @@ -426,7 +426,7 @@ subroutine init_restart if (restart_aero) then call read_restart_aero else - do iblk = 1, nblocks + do iblk = 1, nblocks call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) enddo ! iblk endif ! .not. restart_aero diff --git a/cicecore/drivers/unittest/sumchk/sumchk.F90 b/cicecore/drivers/unittest/sumchk/sumchk.F90 index f314959cb..aba435b0e 100644 --- a/cicecore/drivers/unittest/sumchk/sumchk.F90 +++ b/cicecore/drivers/unittest/sumchk/sumchk.F90 @@ -1,7 +1,7 @@ program sumchk - ! This tests the CICE ice_global_reductions infrastructure by + ! This tests the CICE ice_global_reductions infrastructure by ! using CICE_InitMod (from the standalone model) to read/initialize ! a CICE grid/configuration. Then methods in ice_global_reductions ! are verified using hardwired inputs with known outputs. @@ -28,7 +28,7 @@ program sumchk integer(int_kind) :: i, j, k, l, m, n, iblock, ib, ie, jb, je integer(int_kind) :: blockID, numBlocks - type (block) :: this_block + type (block) :: this_block real(dbl_kind) ,allocatable :: arrayA(:,:,:),arrayB(:,:,:),arrayC(:,:,:) integer(int_kind),allocatable :: arrayiA(:,:,:),arrayiB(:,:,:) diff --git a/cicecore/shared/ice_arrays_column.F90 b/cicecore/shared/ice_arrays_column.F90 index dbad4292c..c9e8be8db 100644 --- a/cicecore/shared/ice_arrays_column.F90 +++ b/cicecore/shared/ice_arrays_column.F90 @@ -1,7 +1,7 @@ !======================================================================= ! Grid-dependent arrays needed for column package -! These were originally module variables in modules that became part of +! These were originally module variables in modules that became part of ! the column package ! author: Elizabeth C. Hunke, LANL @@ -94,9 +94,9 @@ module ice_arrays_column ! albedo components for history real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & - albicen, & ! bare ice - albsnon, & ! snow - albpndn, & ! pond + albicen, & ! bare ice + albsnon, & ! snow + albpndn, & ! pond apeffn ! effective pond area used for radiation calculation real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & @@ -128,12 +128,12 @@ module ice_arrays_column ! aerosol optical properties -> band | ! v aerosol ! for combined dust category, use category 4 properties - real (kind=dbl_kind), dimension(:,:), allocatable, public :: & + real (kind=dbl_kind), dimension(:,:), allocatable, public :: & kaer_tab, & ! aerosol mass extinction cross section (m2/kg) waer_tab, & ! aerosol single scatter albedo (fraction) gaer_tab ! aerosol asymmetry parameter (cos(theta)) - real (kind=dbl_kind), dimension(:,:), allocatable, public :: & + real (kind=dbl_kind), dimension(:,:), allocatable, public :: & kaer_bc_tab, & ! BC mass extinction cross section (m2/kg) waer_bc_tab, & ! BC single scatter albedo (fraction) gaer_bc_tab ! BC aerosol asymmetry parameter (cos(theta)) @@ -146,7 +146,7 @@ module ice_arrays_column real (kind=dbl_kind), dimension (:), allocatable, public :: & bgrid , & ! biology nondimensional vertical grid points igrid , & ! biology vertical interface points - cgrid , & ! CICE vertical coordinate + cgrid , & ! CICE vertical coordinate icgrid , & ! interface grid for CICE (shortwave variable) swgrid ! grid for ice tracers used in dEdd scheme @@ -187,21 +187,21 @@ module ice_arrays_column ! 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic + icepack_max_don + icepack_max_fe ! Fep(1:icepack_max_fe) = 2*icepack_max_algae + icepack_max_doc + 8 + icepack_max_dic + icepack_max_don + icepack_max_fe: ! 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic + icepack_max_don + 2*icepack_max_fe - ! zaero(1:icepack_max_aero) = 2*icepack_max_algae + icepack_max_doc + 8 + icepack_max_dic + icepack_max_don + 2*icepack_max_fe: + ! zaero(1:icepack_max_aero) = 2*icepack_max_algae + icepack_max_doc + 8 + icepack_max_dic + icepack_max_don + 2*icepack_max_fe: ! 2*icepack_max_algae + icepack_max_doc + 7 + icepack_max_dic + icepack_max_don + 2*icepack_max_fe ! + icepack_max_aero ! humic == 2*icepack_max_algae + icepack_max_doc + 8 + icepack_max_dic + icepack_max_don + 2*icepack_max_fe - ! + icepack_max_aero + ! + icepack_max_aero integer (kind=int_kind), dimension(:,:,:,:), allocatable, public :: & - algal_peak ! vertical location of algal maximum, 0 if no maximum + algal_peak ! vertical location of algal maximum, 0 if no maximum - real (kind=dbl_kind), & + real (kind=dbl_kind), & dimension (:,:,:,:,:), allocatable, public :: & Zoo ! N losses accumulated in timestep (ie. zooplankton/bacteria) ! mmol/m^3 - real (kind=dbl_kind), & + real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & dhbr_top , & ! brine top change dhbr_bot ! brine bottom change @@ -214,7 +214,7 @@ module ice_arrays_column real (kind=dbl_kind), & dimension (:,:,:,:,:), allocatable, public :: & - bphi , & ! porosity of layers + bphi , & ! porosity of layers bTiz ! layer temperatures interpolated on bio grid (C) real (kind=dbl_kind), & @@ -222,23 +222,23 @@ module ice_arrays_column darcy_V ! darcy velocity positive up (m/s) real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - zsal_tot , & ! Total ice salinity in per grid cell (g/m^2) - chl_net , & ! Total chla (mg chla/m^2) per grid cell - NO_net ! Total nitrate per grid cell + zsal_tot , & ! Total ice salinity in per grid cell (g/m^2) + chl_net , & ! Total chla (mg chla/m^2) per grid cell + NO_net ! Total nitrate per grid cell logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & - Rayleigh_criteria ! .true. means Ra_c was reached + Rayleigh_criteria ! .true. means Ra_c was reached real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & Rayleigh_real ! .true. = c1, .false. = c0 - real (kind=dbl_kind), & + real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & sice_rho ! avg sea ice density (kg/m^3) ! ech: diagnostic only? - real (kind=dbl_kind), & + real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & - fzsaln, & ! category fzsal(kg/m^2/s) + fzsaln, & ! category fzsal(kg/m^2/s) fzsaln_g ! salt flux from gravity drainage only real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & @@ -249,26 +249,26 @@ module ice_arrays_column zfswin ! Shortwave flux into layers interpolated on bio grid (W/m^2) real (kind=dbl_kind), dimension (:,:,:,:,:), allocatable, public :: & - iDi , & ! igrid Diffusivity (m^2/s) - iki ! Ice permeability (m^2) + iDi , & ! igrid Diffusivity (m^2/s) + iki ! Ice permeability (m^2) real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & upNO , & ! nitrate uptake rate (mmol/m^2/d) times aice upNH ! ammonium uptake rate (mmol/m^2/d) times aice - + real (kind=dbl_kind), & dimension(:,:,:,:,:), allocatable, public :: & - trcrn_sw ! bgc tracers active in the delta-Eddington shortwave + trcrn_sw ! bgc tracers active in the delta-Eddington shortwave ! calculation on the shortwave grid (swgrid) real (kind=dbl_kind), & dimension (:,:,:,:), allocatable, public :: & - ice_bio_net , & ! depth integrated tracer (mmol/m^2) + ice_bio_net , & ! depth integrated tracer (mmol/m^2) snow_bio_net ! depth integrated snow tracer (mmol/m^2) logical (kind=log_kind), public :: & oceanmixed_ice, & ! if true, use internal ocean mixed layer - restore_bgc ! + restore_bgc ! character(char_len), public :: & fe_data_type ! 'default', 'clim' @@ -280,7 +280,7 @@ module ice_arrays_column optics_file, & ! modal aero optics file optics_file_fieldname ! modal aero optics file fieldname - real (kind=dbl_kind), dimension(:), allocatable, public :: & + real (kind=dbl_kind), dimension(:), allocatable, public :: & R_C2N_DON ! carbon to nitrogen mole ratio of DON pool real (kind=dbl_kind), dimension(:), allocatable, public :: & @@ -353,11 +353,11 @@ subroutine alloc_arrays_column grow_net (nx_block,ny_block,max_blocks), & ! Specific growth rate (/s) per grid cell PP_net (nx_block,ny_block,max_blocks), & ! Total production (mg C/m^2/s) per grid cell hbri (nx_block,ny_block,max_blocks), & ! brine height, area-averaged for comparison with hi (m) - zsal_tot (nx_block,ny_block,max_blocks), & ! Total ice salinity in per grid cell (g/m^2) - chl_net (nx_block,ny_block,max_blocks), & ! Total chla (mg chla/m^2) per grid cell - NO_net (nx_block,ny_block,max_blocks), & ! Total nitrate per grid cell + zsal_tot (nx_block,ny_block,max_blocks), & ! Total ice salinity in per grid cell (g/m^2) + chl_net (nx_block,ny_block,max_blocks), & ! Total chla (mg chla/m^2) per grid cell + NO_net (nx_block,ny_block,max_blocks), & ! Total nitrate per grid cell Rayleigh_criteria & - (nx_block,ny_block,max_blocks), & ! .true. means Ra_c was reached + (nx_block,ny_block,max_blocks), & ! .true. means Ra_c was reached Rayleigh_real(nx_block,ny_block,max_blocks), & ! .true. = c1, .false. = c0 fzsal (nx_block,ny_block,max_blocks), & ! Total flux of salt to ocean at time step for conservation fzsal_g (nx_block,ny_block,max_blocks), & ! Total gravity drainage flux @@ -371,9 +371,9 @@ subroutine alloc_arrays_column alidrn (nx_block,ny_block,ncat,max_blocks), & ! near-ir direct albedo (fraction) alvdfn (nx_block,ny_block,ncat,max_blocks), & ! visible diffuse albedo (fraction) alidfn (nx_block,ny_block,ncat,max_blocks), & ! near-ir diffuse albedo (fraction) - albicen (nx_block,ny_block,ncat,max_blocks), & ! bare ice - albsnon (nx_block,ny_block,ncat,max_blocks), & ! snow - albpndn (nx_block,ny_block,ncat,max_blocks), & ! pond + albicen (nx_block,ny_block,ncat,max_blocks), & ! bare ice + albsnon (nx_block,ny_block,ncat,max_blocks), & ! snow + albpndn (nx_block,ny_block,ncat,max_blocks), & ! pond apeffn (nx_block,ny_block,ncat,max_blocks), & ! effective pond area used for radiation calculation snowfracn (nx_block,ny_block,ncat,max_blocks), & ! Category snow fraction used in radiation fswsfcn (nx_block,ny_block,ncat,max_blocks), & ! SW absorbed at ice/snow surface (W m-2) @@ -385,21 +385,21 @@ subroutine alloc_arrays_column fswintn (nx_block,ny_block,ncat,max_blocks), & ! SW absorbed in ice interior, below surface (W m-2) first_ice_real & (nx_block,ny_block,ncat,max_blocks), & ! .true. = c1, .false. = c0 - first_ice (nx_block,ny_block,ncat,max_blocks), & ! distinguishes ice that disappears (e.g. melts) and reappears (e.g. transport) + first_ice (nx_block,ny_block,ncat,max_blocks), & ! distinguishes ice that disappears (melts) and reappears (transport) dhbr_top (nx_block,ny_block,ncat,max_blocks), & ! brine top change dhbr_bot (nx_block,ny_block,ncat,max_blocks), & ! brine bottom change darcy_V (nx_block,ny_block,ncat,max_blocks), & ! darcy velocity positive up (m/s) sice_rho (nx_block,ny_block,ncat,max_blocks), & ! avg sea ice density (kg/m^3) ! ech: diagnostic only? - fzsaln (nx_block,ny_block,ncat,max_blocks), & ! category fzsal(kg/m^2/s) + fzsaln (nx_block,ny_block,ncat,max_blocks), & ! category fzsal(kg/m^2/s) fzsaln_g (nx_block,ny_block,ncat,max_blocks), & ! salt flux from gravity drainage only Iswabsn (nx_block,ny_block,nilyr,ncat,max_blocks), & ! SW radiation absorbed in ice layers (W m-2) Sswabsn (nx_block,ny_block,nslyr,ncat,max_blocks), & ! SW radiation absorbed in snow layers (W m-2) fswpenln (nx_block,ny_block,nilyr+1,ncat,max_blocks), & ! visible SW entering ice layers (W m-2) Zoo (nx_block,ny_block,nblyr+1,ncat,max_blocks), & ! N losses accumulated in timestep (ie. zooplankton/bacteria) zfswin (nx_block,ny_block,nblyr+1,ncat,max_blocks), & ! Shortwave flux into layers interpolated on bio grid (W/m^2) - iDi (nx_block,ny_block,nblyr+1,ncat,max_blocks), & ! igrid Diffusivity (m^2/s) - iki (nx_block,ny_block,nblyr+1,ncat,max_blocks), & ! Ice permeability (m^2) - bphi (nx_block,ny_block,nblyr+2,ncat,max_blocks), & ! porosity of layers + iDi (nx_block,ny_block,nblyr+1,ncat,max_blocks), & ! igrid Diffusivity (m^2/s) + iki (nx_block,ny_block,nblyr+1,ncat,max_blocks), & ! Ice permeability (m^2) + bphi (nx_block,ny_block,nblyr+2,ncat,max_blocks), & ! porosity of layers bTiz (nx_block,ny_block,nblyr+2,ncat,max_blocks), & ! layer temperatures interpolated on bio grid (C) stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of Memory1') @@ -409,9 +409,9 @@ subroutine alloc_arrays_column fbio_snoice (nx_block,ny_block,max_nbtrcr,max_blocks), & ! fluxes from snow to ice fbio_atmice (nx_block,ny_block,max_nbtrcr,max_blocks), & ! fluxes from atm to ice ocean_bio_all(nx_block,ny_block,max_nbtrcr,max_blocks), & ! fixed order, all values even for tracers false - ice_bio_net (nx_block,ny_block,max_nbtrcr,max_blocks), & ! depth integrated tracer (mmol/m^2) + ice_bio_net (nx_block,ny_block,max_nbtrcr,max_blocks), & ! depth integrated tracer (mmol/m^2) snow_bio_net (nx_block,ny_block,max_nbtrcr,max_blocks), & ! depth integrated snow tracer (mmol/m^2) - algal_peak (nx_block,ny_block,max_algae ,max_blocks), & ! vertical location of algal maximum, 0 if no maximum + algal_peak (nx_block,ny_block,max_algae ,max_blocks), & ! vertical location of algal maximum, 0 if no maximum stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of Memory2') @@ -420,7 +420,7 @@ subroutine alloc_arrays_column c_hi_range(ncat) , & ! bgrid(nblyr+2) , & ! biology nondimensional vertical grid points igrid(nblyr+1) , & ! biology vertical interface points - cgrid(nilyr+1) , & ! CICE vertical coordinate + cgrid(nilyr+1) , & ! CICE vertical coordinate icgrid(nilyr+1) , & ! interface grid for CICE (shortwave variable) swgrid(nilyr+1) , & ! grid for ice tracers used in dEdd scheme stat=ierr) diff --git a/cicecore/shared/ice_calendar.F90 b/cicecore/shared/ice_calendar.F90 index 7684fef67..ad1a87b4c 100644 --- a/cicecore/shared/ice_calendar.F90 +++ b/cicecore/shared/ice_calendar.F90 @@ -4,7 +4,7 @@ ! ! Authors: Elizabeth C. Hunke, LANL ! Tony Craig, NCAR -! Craig MacLachlan, UK Met Office +! Craig MacLachlan, UK Met Office ! ! 2006 ECH: Removed 'w' option for history; added 'h' and histfreq_n. ! Converted to free form source (F90). @@ -199,7 +199,7 @@ subroutine init_calendar hour=0 ! computed in calendar, but needs some reasonable initial value istep1 = istep0 ! number of steps at current timestep ! real (dumped) or imagined (use to set calendar) - idate0 = (myear)*10000 + mmonth*100 + mday ! date (yyyymmdd) + idate0 = (myear)*10000 + mmonth*100 + mday ! date (yyyymmdd) stop_now = 0 ! end program execution if stop_now=1 dt_dyn = dt/real(ndtd,kind=dbl_kind) ! dynamics et al timestep force_restart_now = .false. @@ -385,7 +385,7 @@ subroutine calendar() call abort_ice(subname//'ERROR: model year too large') endif - idate = (myear)*10000 + mmonth*100 + mday ! date (yyyymmdd) + idate = (myear)*10000 + mmonth*100 + mday ! date (yyyymmdd) yday = daycal(mmonth) + mday ! day of the year hour = int(msec/seconds_per_hour) @@ -638,7 +638,6 @@ integer function compute_days_between(year0,month0,day0,year1,month1,day1) integer (kind=int_kind), intent(in) :: day1 ! end day ! Internal variable - logical (kind=log_kind) :: isleap ! Leap year logical integer (kind=int_kind) :: nday0, nday1 character(len=*),parameter :: subname='(compute_days_between)' @@ -911,7 +910,7 @@ subroutine calendar_time2date(atimesecs,ayear,amon,aday,asec,year_ref,mon_ref,da tday = 1 tsec = 0 - ! add initial seconds to timesecs and treat lsec_ref as zero + ! add initial seconds to timesecs and treat lsec_ref as zero ltimesecs = atimesecs + real(lsec_ref,kind=dbl_kind) ! first estimate of tyear diff --git a/cicecore/shared/ice_constants.F90 b/cicecore/shared/ice_constants.F90 index c49732e35..f2da2ef9d 100644 --- a/cicecore/shared/ice_constants.F90 +++ b/cicecore/shared/ice_constants.F90 @@ -1,7 +1,7 @@ !======================================================================= ! ! This module defines a variety of physical and numerical constants -! used throughout the ice model +! used throughout the ice model ! ! author Elizabeth C. Hunke, LANL @@ -33,7 +33,7 @@ module ice_constants real (kind=dbl_kind), public :: & shlat = 30.0_dbl_kind ,&! artificial masking edge (deg) nhlat = -30.0_dbl_kind ! artificial masking edge (deg) - + !----------------------------------------------------------------- ! numbers used outside the column package !----------------------------------------------------------------- @@ -91,12 +91,12 @@ module ice_constants ! location of fields for staggered grids !----------------------------------------------------------------- - integer (int_kind), parameter, public :: & - field_loc_unknown = 0, & - field_loc_noupdate = -1, & - field_loc_center = 1, & - field_loc_NEcorner = 2, & - field_loc_Nface = 3, & + integer (int_kind), parameter, public :: & + field_loc_unknown = 0, & + field_loc_noupdate = -1, & + field_loc_center = 1, & + field_loc_NEcorner = 2, & + field_loc_Nface = 3, & field_loc_Eface = 4, & field_loc_Wface = 5 @@ -105,11 +105,11 @@ module ice_constants ! changes of direction across tripole boundary !----------------------------------------------------------------- - integer (int_kind), parameter, public :: & - field_type_unknown = 0, & - field_type_noupdate = -1, & - field_type_scalar = 1, & - field_type_vector = 2, & + integer (int_kind), parameter, public :: & + field_type_unknown = 0, & + field_type_noupdate = -1, & + field_type_scalar = 1, & + field_type_vector = 2, & field_type_angle = 3 !----------------------------------------------------------------- @@ -138,9 +138,10 @@ subroutine ice_init_constants( & omega_in , & ! angular velocity of earth (rad/sec) radius_in , & ! earth radius (m) spval_dbl_in , & ! special value (double precision) - spval_in , & ! special value for netCDF output shlat_in , & ! artificial masking edge (deg) nhlat_in ! artificial masking edge (deg) + real (kind=real_kind), intent(in), optional :: & + spval_in ! special value for netCDF output character(len=*),parameter :: subname='(ice_init_constants)' diff --git a/cicecore/shared/ice_distribution.F90 b/cicecore/shared/ice_distribution.F90 index 1a23b63be..0f3f6c198 100644 --- a/cicecore/shared/ice_distribution.F90 +++ b/cicecore/shared/ice_distribution.F90 @@ -61,7 +61,7 @@ function create_distribution(dist_type, nprocs, work_per_block) ! by call the appropriate subroutine based on distribution type ! requested. Currently three distributions are supported: ! 2-d Cartesian distribution (cartesian), a load-balanced -! distribution using a rake algorithm based on an input amount of work +! distribution using a rake algorithm based on an input amount of work ! per block, and a space-filling-curve algorithm. character (*), intent(in) :: & @@ -180,14 +180,6 @@ subroutine create_local_block_ids(block_ids, distribution) do n=1,size(distribution%blockLocation) if (distribution%blockLocation(n) == my_task+1) then block_ids(distribution%blockLocalID(n)) = n - - if (debug_blocks .and. my_task == master_task) then - write(nu_diag,'(2a,3i8)') & - subname,' block id, proc, local_block: ', & - block_ids(distribution%blockLocalID(n)), & - distribution%blockLocation(n), & - distribution%blockLocalID(n) - endif endif end do endif @@ -597,7 +589,7 @@ function create_distrb_cart(nprocs, workPerBlock) result(newDistrb) newDistrb%nprocs = nprocs call proc_decomposition(nprocs, nprocsX, nprocsY) - + !---------------------------------------------------------------------- ! @@ -639,7 +631,7 @@ function create_distrb_cart(nprocs, workPerBlock) result(newDistrb) do j=1,nprocsY do i=1,nprocsX - processor = (j-1)*nprocsX + i ! number the processors + processor = (j-1)*nprocsX + i ! number the processors ! left to right, bot to top is = (i-1)*numBlocksXPerProc + 1 ! starting block in i @@ -783,7 +775,7 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- dist = create_distrb_cart(nprocs, workPerBlock) - + !---------------------------------------------------------------------- ! ! if the number of blocks is close to the number of processors, @@ -909,7 +901,7 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) call ice_distributionRake (workTmp, procTmp, workPerBlock, & priority, dist) end do - + deallocate(workTmp, procTmp, stat=istat) if (istat > 0) then call abort_ice( & @@ -1092,7 +1084,7 @@ function create_distrb_roundrobin(nprocs, workPerBlock) result(newDistrb) integer (int_kind), dimension(:), allocatable :: & proc_tmp ! temp processor id - + character(len=*),parameter :: subname='(create_distrb_roundrobin)' !---------------------------------------------------------------------- @@ -1143,7 +1135,7 @@ function create_distrb_roundrobin(nprocs, workPerBlock) result(newDistrb) do j=1,nblocks_y do i=1,nblocks_x - + globalID = globalID + 1 if (workPerBlock(globalID) /= 0) then @@ -1199,7 +1191,7 @@ function create_distrb_roundrobin(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- end function create_distrb_roundrobin - + !*********************************************************************** function create_distrb_spiralcenter(nprocs, workPerBlock) result(newDistrb) @@ -1237,7 +1229,7 @@ function create_distrb_spiralcenter(nprocs, workPerBlock) result(newDistrb) blocklist ! temp block ordered list integer (int_kind), dimension(:,:), allocatable :: & blockchk ! temp block check array - + character(len=*),parameter :: subname='(create_distrb_spiralcenter)' !---------------------------------------------------------------------- @@ -1424,7 +1416,7 @@ function create_distrb_spiralcenter(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- end function create_distrb_spiralcenter - + !*********************************************************************** function create_distrb_wghtfile(nprocs, workPerBlock) result(newDistrb) @@ -1461,7 +1453,7 @@ function create_distrb_wghtfile(nprocs, workPerBlock) result(newDistrb) proc_tmp ! temp processor id logical (log_kind) :: up ! direction of pe counting - + character(len=*),parameter :: subname='(create_distrb_wghtfile)' !---------------------------------------------------------------------- @@ -1590,7 +1582,7 @@ function create_distrb_wghtfile(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- end function create_distrb_wghtfile - + !*********************************************************************** function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) @@ -1628,7 +1620,7 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) logical (log_kind), dimension(:), allocatable :: & bfree ! map of assigned blocks - + integer (int_kind) :: cnt, blktogether, i2 integer (int_kind) :: totblocks, nchunks logical (log_kind) :: keepgoing @@ -1704,7 +1696,7 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) blktogether = max(1,nint(float(totblocks)/float(6*nprocs))) ! write(nu_diag,*) 'ice_distrb_sectrobin totblocks = ',totblocks,nblocks_y*nblocks_x - + !------------------------------ ! southern group of blocks ! weave back and forth in i vs j @@ -1897,7 +1889,7 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- end function create_distrb_sectrobin - + !*********************************************************************** function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) @@ -1933,7 +1925,7 @@ function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) integer (int_kind), dimension(:), allocatable :: & proc_tmp ! temp processor id - + integer (int_kind) :: n character(len=*),parameter :: subname='(create_distrb_sectcart)' @@ -1997,7 +1989,7 @@ function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) cnt = 0 do j2=1,nblocks_y do i2=1,nblocks_x/2 - + if (n == 1) then i = i2 j = j2 @@ -2066,7 +2058,7 @@ function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- end function create_distrb_sectcart - + !********************************************************************** function create_distrb_spacecurve(nprocs,work_per_block) @@ -2400,7 +2392,7 @@ subroutine ice_distributionRake (procWork, procID, blockWork, & integer (int_kind) :: & i, n, &! dummy loop indices np1, &! n+1 corrected for cyclical wrap - iproc, inext, &! processor ids for current and next + iproc, inext, &! processor ids for current and next nprocs, numBlocks, &! number of blocks, processors lastPriority, &! priority for most recent block minPriority, &! minimum priority diff --git a/cicecore/shared/ice_domain_size.F90 b/cicecore/shared/ice_domain_size.F90 index 56381b986..999a35f48 100644 --- a/cicecore/shared/ice_domain_size.F90 +++ b/cicecore/shared/ice_domain_size.F90 @@ -51,11 +51,11 @@ module ice_domain_size !*** values for the parameter below. A value higher than !*** necessary will not cause the code to fail, but will !*** allocate more memory than is necessary. A value that - !*** is too low will cause the code to exit. + !*** is too low will cause the code to exit. !*** A good initial guess is found using !*** max_blocks = (nx_global/block_size_x)*(ny_global/block_size_y)/ !*** num_procs - + !======================================================================= end module ice_domain_size diff --git a/cicecore/shared/ice_fileunits.F90 b/cicecore/shared/ice_fileunits.F90 index ccb518807..1854dda64 100644 --- a/cicecore/shared/ice_fileunits.F90 +++ b/cicecore/shared/ice_fileunits.F90 @@ -84,7 +84,7 @@ module ice_fileunits nu_diag_set = .false. ! flag to indicate whether nu_diag is already set integer (kind=int_kind), public :: & - ice_IOUnitsMinUnit = 11, & ! do not use unit numbers below + ice_IOUnitsMinUnit = 11, & ! do not use unit numbers below ice_IOUnitsMaxUnit = 99 ! or above, set by setup_nml logical (kind=log_kind), dimension(:), allocatable :: & @@ -101,8 +101,8 @@ module ice_fileunits !======================================================================= -! This routine grabs needed unit numbers. -! nu_diag is set to 6 (stdout) but may be reset later by the namelist. +! This routine grabs needed unit numbers. +! nu_diag is set to 6 (stdout) but may be reset later by the namelist. ! nu_nml is obtained separately. subroutine init_fileunits @@ -203,7 +203,7 @@ end subroutine get_fileunit !======================================================================= -! This routine releases unit numbers at the end of a run. +! This routine releases unit numbers at the end of a run. subroutine release_all_fileunits diff --git a/cicecore/shared/ice_init_column.F90 b/cicecore/shared/ice_init_column.F90 index 89a378948..5339aa6ec 100644 --- a/cicecore/shared/ice_init_column.F90 +++ b/cicecore/shared/ice_init_column.F90 @@ -10,7 +10,7 @@ module ice_init_column use ice_blocks, only: nx_block, ny_block use ice_constants use ice_communicate, only: my_task, master_task, ice_barrier - use ice_domain_size, only: ncat, max_blocks + use ice_domain_size, only: ncat, max_blocks use ice_domain_size, only: nblyr, nilyr, nslyr use ice_domain_size, only: n_aero, n_zaero, n_algae use ice_domain_size, only: n_doc, n_dic, n_don @@ -270,12 +270,12 @@ subroutine init_shortwave Iswabsn(:,:,:,:,iblk) = c0 Sswabsn(:,:,:,:,iblk) = c0 - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi - + do j = 1, ny_block ! can be jlo, jhi do i = 1, nx_block ! can be ilo, ihi @@ -397,7 +397,7 @@ subroutine init_shortwave l_print_point=l_print_point, & initonly = .true.) endif - + !----------------------------------------------------------------- ! Define aerosol tracer on shortwave grid !----------------------------------------------------------------- @@ -414,7 +414,7 @@ subroutine init_shortwave enddo ! j !----------------------------------------------------------------- - ! Aggregate albedos + ! Aggregate albedos ! Match loop order in coupling_prep for same order of operations !----------------------------------------------------------------- @@ -528,7 +528,7 @@ end subroutine init_FY ! Initialize ice lvl tracers (call prior to reading restart data) - subroutine init_lvl(iblk, alvl, vlvl) + subroutine init_lvl(iblk, alvl, vlvl) use ice_constants, only: c0, c1 use ice_arrays_column, only: ffracn, dhsn @@ -599,7 +599,7 @@ subroutine init_meltponds_topo(apnd, hpnd, ipnd) apnd(:,:,:) = c0 hpnd(:,:,:) = c0 ipnd(:,:,:) = c0 - + end subroutine init_meltponds_topo !======================================================================= @@ -751,7 +751,7 @@ end subroutine init_aerosol ! Initialize vertical profile for biogeochemistry - subroutine init_bgc() + subroutine init_bgc() use ice_arrays_column, only: zfswin, trcrn_sw, & ocean_bio_all, ice_bio_net, snow_bio_net, & @@ -781,15 +781,15 @@ subroutine init_bgc() logical (kind=log_kind) :: & RayleighC , & solve_zsal - + type (block) :: & this_block ! block information for current block real(kind=dbl_kind), allocatable :: & trcrn_bgc(:,:) - + real(kind=dbl_kind), dimension(nilyr,ncat) :: & - sicen + sicen real(kind=dbl_kind) :: & RayleighR @@ -814,13 +814,13 @@ subroutine init_bgc() allocate(trcrn_bgc(ntrcr,ncat)) - bphi(:,:,:,:,:) = c0 ! initial porosity for no ice + bphi(:,:,:,:,:) = c0 ! initial porosity for no ice iDi (:,:,:,:,:) = c0 ! interface diffusivity bTiz(:,:,:,:,:) = c0 ! initial bio grid ice temperature iki (:,:,:,:,:) = c0 ! permeability ocean_bio_all(:,:,:,:) = c0 - ice_bio_net (:,:,:,:) = c0 ! integrated ice tracer conc (mmol/m^2 or mg/m^2) + ice_bio_net (:,:,:,:) = c0 ! integrated ice tracer conc (mmol/m^2 or mg/m^2) snow_bio_net (:,:,:,:) = c0 ! integrated snow tracer conc (mmol/m^2 or mg/m^2) zfswin (:,:,:,:,:) = c0 ! shortwave flux on bio grid trcrn_sw (:,:,:,:,:) = c0 ! tracers active in the shortwave calculation @@ -831,20 +831,20 @@ subroutine init_bgc() !----------------------------------------------------------------- ! zsalinity initialization !----------------------------------------------------------------- - - if (solve_zsal) then ! default values + + if (solve_zsal) then ! default values !$OMP PARALLEL DO PRIVATE(iblk,i,j,k,n,ilo,ihi,jlo,jhi,this_block,trcrn_bgc) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi call icepack_init_zsalinity(nblyr=nblyr, ntrcr_o=ntrcr_o, & Rayleigh_criteria = RayleighC, & Rayleigh_real = RayleighR, & @@ -863,7 +863,7 @@ subroutine init_bgc() enddo endif enddo ! i - enddo ! j + enddo ! j enddo ! iblk !$OMP END PARALLEL DO call icepack_warnings_flush(nu_diag) @@ -877,15 +877,15 @@ subroutine init_bgc() ! biogeochemistry initialization !----------------------------------------------------------------- - if (.not. restart_bgc) then - + if (.not. restart_bgc) then + !----------------------------------------------------------------- ! Initial Ocean Values if not coupled to the ocean bgc !----------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk,i,j,n,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -919,14 +919,14 @@ subroutine init_bgc() !$OMP PARALLEL DO PRIVATE(iblk,i,j,n,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi call icepack_load_ocean_bio_array(max_nbtrcr=icepack_max_nbtrcr, & max_algae=icepack_max_algae, max_don=icepack_max_don, & @@ -934,7 +934,7 @@ subroutine init_bgc() max_dic=icepack_max_dic, max_aero=icepack_max_aero, & nit =nit (i,j, iblk), amm=amm(i,j, iblk), sil =sil (i,j, iblk), & dmsp=dmsp(i,j, iblk), dms=dms(i,j, iblk), algalN=algalN(i,j,:,iblk), & - doc =doc (i,j,:,iblk), don=don(i,j,:,iblk), dic =dic (i,j,:,iblk), & + doc =doc (i,j,:,iblk), don=don(i,j,:,iblk), dic =dic (i,j,:,iblk), & fed =fed (i,j,:,iblk), fep=fep(i,j,:,iblk), zaeros=zaeros(i,j,:,iblk), & hum=hum (i,j, iblk), ocean_bio_all=ocean_bio_all(i,j,:,iblk)) @@ -948,18 +948,18 @@ subroutine init_bgc() if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - if (.not. restart_bgc) then + if (.not. restart_bgc) then !$OMP PARALLEL DO PRIVATE(iblk,i,j,k,n,ilo,ihi,jlo,jhi,this_block,sicen,trcrn_bgc) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi do n = 1, ncat do k = 1, nilyr sicen(k,n) = trcrn(i,j,nt_sice+k-1,n,iblk) @@ -987,7 +987,7 @@ subroutine init_bgc() ! read restart to complete BGC initialization !----------------------------------------------------------------- - if (restart_zsal .or. restart_bgc) call read_restart_bgc + if (restart_zsal .or. restart_bgc) call read_restart_bgc deallocate(trcrn_bgc) @@ -1030,7 +1030,7 @@ subroutine init_hbrine() if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__,line= __LINE__) - first_ice(:,:,:,:) = .true. + first_ice(:,:,:,:) = .true. if (tr_brine) trcrn(:,:,nt_fbri,:,:) = c1 end subroutine init_hbrine @@ -1038,7 +1038,7 @@ end subroutine init_hbrine !======================================================================= ! Namelist variables, set to default values; may be altered at run time -! +! ! author Elizabeth C. Hunke, LANL ! Nicole Jeffery, LANL @@ -1060,7 +1060,7 @@ subroutine input_zbgc tr_bgc_N, tr_bgc_C, tr_bgc_chl, & tr_bgc_DON, tr_bgc_Fe, tr_zaero, & tr_bgc_hum, tr_aero - + integer (kind=int_kind) :: & ktherm @@ -1087,7 +1087,7 @@ subroutine input_zbgc restore_bgc, restart_bgc, scale_bgc, solve_zsal, restart_zsal, & tr_bgc_Nit, tr_bgc_C, tr_bgc_chl, tr_bgc_Am, tr_bgc_Sil, & tr_bgc_DMS, tr_bgc_PON, tr_bgc_hum, tr_bgc_DON, tr_bgc_Fe, & - grid_o, grid_o_t, l_sk, grid_oS, optics_file, optics_file_fieldname, & + grid_o, grid_o_t, l_sk, grid_oS, optics_file, optics_file_fieldname, & l_skS, phi_snow, initbio_frac, frazil_scav, & ratio_Si2N_diatoms , ratio_Si2N_sp , ratio_Si2N_phaeo , & ratio_S2N_diatoms , ratio_S2N_sp , ratio_S2N_phaeo , & @@ -1124,9 +1124,9 @@ subroutine input_zbgc fedtype_1 , feptype_1 , zaerotype_bc1 , & zaerotype_bc2 , zaerotype_dust1 , zaerotype_dust2 , & zaerotype_dust3 , zaerotype_dust4 , ratio_C2N_diatoms , & - ratio_C2N_sp , ratio_C2N_phaeo , ratio_chl2N_diatoms, & + ratio_C2N_sp , ratio_C2N_phaeo , ratio_chl2N_diatoms, & ratio_chl2N_sp , ratio_chl2N_phaeo , F_abs_chl_diatoms , & - F_abs_chl_sp , F_abs_chl_phaeo , ratio_C2N_proteins + F_abs_chl_sp , F_abs_chl_phaeo , ratio_C2N_proteins !----------------------------------------------------------------- @@ -1151,22 +1151,22 @@ subroutine input_zbgc restart_bgc = .false. ! biogeochemistry restart restart_zsal = .false. ! salinity restart restart_hbrine = .false. ! hbrine restart - scale_bgc = .false. ! initial bgc tracers proportional to S - skl_bgc = .false. ! solve skeletal biochemistry + scale_bgc = .false. ! initial bgc tracers proportional to S + skl_bgc = .false. ! solve skeletal biochemistry z_tracers = .false. ! solve vertically resolved tracers dEdd_algae = .false. ! dynamic algae contributes to shortwave absorption ! in delta-Eddington calculation - solve_zbgc = .false. ! turn on z layer biochemistry - tr_bgc_PON = .false. !--------------------------------------------- + solve_zbgc = .false. ! turn on z layer biochemistry + tr_bgc_PON = .false. !--------------------------------------------- tr_bgc_Nit = .false. ! biogeochemistry (skl or zbgc) tr_bgc_C = .false. ! if skl_bgc = .true. then skl tr_bgc_chl = .false. ! if z_tracers = .true. then vertically resolved tr_bgc_Sil = .false. ! if z_tracers + solve_zbgc = .true. then - tr_bgc_Am = .false. ! vertically resolved with reactions + tr_bgc_Am = .false. ! vertically resolved with reactions tr_bgc_DMS = .false. !------------------------------------------------ - tr_bgc_DON = .false. ! + tr_bgc_DON = .false. ! tr_bgc_hum = .false. ! - tr_bgc_Fe = .false. ! + tr_bgc_Fe = .false. ! tr_bgc_N = .true. ! ! brine height parameter @@ -1175,17 +1175,17 @@ subroutine input_zbgc ! skl biology parameters bgc_flux_type = 'Jin2006'! type of ocean-ice poston velocity ('constant') - ! z biology parameters - grid_o = c5 ! for bottom flux - grid_o_t = c5 ! for top flux - l_sk = 7.0_dbl_kind ! characteristic diffusive scale (m) + ! z biology parameters + grid_o = c5 ! for bottom flux + grid_o_t = c5 ! for top flux + l_sk = 7.0_dbl_kind ! characteristic diffusive scale (m) initbio_frac = c1 ! fraction of ocean trcr concentration in bio trcrs - frazil_scav = c1 ! increase in initial bio tracer from ocean scavenging - ratio_Si2N_diatoms = 1.8_dbl_kind ! algal Si to N (mol/mol) + frazil_scav = c1 ! increase in initial bio tracer from ocean scavenging + ratio_Si2N_diatoms = 1.8_dbl_kind ! algal Si to N (mol/mol) ratio_Si2N_sp = c0 ! diatoms, small plankton, phaeocystis ratio_Si2N_phaeo = c0 ratio_S2N_diatoms = 0.03_dbl_kind ! algal S to N (mol/mol) - ratio_S2N_sp = 0.03_dbl_kind + ratio_S2N_sp = 0.03_dbl_kind ratio_S2N_phaeo = 0.03_dbl_kind ratio_Fe2C_diatoms = 0.0033_dbl_kind ! algal Fe to C (umol/mol) ratio_Fe2C_sp = 0.0033_dbl_kind @@ -1196,7 +1196,7 @@ subroutine input_zbgc ratio_Fe2DON = 0.023_dbl_kind ! Fe to N of DON (nmol/umol) ratio_Fe2DOC_s = p1 ! Fe to C of DOC (nmol/umol) saccharids ratio_Fe2DOC_l = 0.033_dbl_kind ! Fe to C of DOC (nmol/umol) lipids - fr_resp = 0.05_dbl_kind ! frac of algal growth lost due to respiration + fr_resp = 0.05_dbl_kind ! frac of algal growth lost due to respiration tau_min = 5200.0_dbl_kind ! rapid mobile to stationary exchanges (s) tau_max = 1.73e5_dbl_kind ! long time mobile to stationary exchanges (s) algal_vel = 1.11e-8_dbl_kind! 0.5 cm/d(m/s) Lavoie 2005 1.5 cm/day @@ -1205,13 +1205,13 @@ subroutine input_zbgc chlabs_diatoms = 0.03_dbl_kind ! chl absorption (1/m/(mg/m^3)) chlabs_sp = 0.01_dbl_kind chlabs_phaeo = 0.05_dbl_kind - alpha2max_low_diatoms = 0.8_dbl_kind ! light limitation (1/(W/m^2)) + alpha2max_low_diatoms = 0.8_dbl_kind ! light limitation (1/(W/m^2)) alpha2max_low_sp = 0.67_dbl_kind alpha2max_low_phaeo = 0.67_dbl_kind - beta2max_diatoms = 0.018_dbl_kind ! light inhibition (1/(W/m^2)) + beta2max_diatoms = 0.018_dbl_kind ! light inhibition (1/(W/m^2)) beta2max_sp = 0.0025_dbl_kind beta2max_phaeo = 0.01_dbl_kind - mu_max_diatoms = 1.2_dbl_kind ! maximum growth rate (1/day) + mu_max_diatoms = 1.2_dbl_kind ! maximum growth rate (1/day) mu_max_sp = 0.851_dbl_kind mu_max_phaeo = 0.851_dbl_kind grow_Tdep_diatoms = 0.06_dbl_kind ! Temperature dependence of growth (1/C) @@ -1241,10 +1241,10 @@ subroutine input_zbgc K_Fe_diatoms = c1 ! iron half saturation (nM) K_Fe_sp = 0.2_dbl_kind K_Fe_phaeo = p1 - f_don_protein = 0.6_dbl_kind ! fraction of spilled grazing to proteins - kn_bac_protein = 0.03_dbl_kind ! Bacterial degredation of DON (1/d) - f_don_Am_protein = 0.25_dbl_kind ! fraction of remineralized DON to ammonium - f_doc_s = 0.4_dbl_kind ! fraction of mortality to DOC + f_don_protein = 0.6_dbl_kind ! fraction of spilled grazing to proteins + kn_bac_protein = 0.03_dbl_kind ! Bacterial degredation of DON (1/d) + f_don_Am_protein = 0.25_dbl_kind ! fraction of remineralized DON to ammonium + f_doc_s = 0.4_dbl_kind ! fraction of mortality to DOC f_doc_l = 0.4_dbl_kind f_exude_s = c1 ! fraction of exudation to DOC f_exude_l = c1 @@ -1254,15 +1254,15 @@ subroutine input_zbgc fsal = c1 ! Salinity limitation (ppt) op_dep_min = p1 ! Light attenuates for optical depths exceeding min fr_graze_s = p5 ! fraction of grazing spilled or slopped - fr_graze_e = p5 ! fraction of assimilation excreted + fr_graze_e = p5 ! fraction of assimilation excreted fr_mort2min = p5 ! fractionation of mortality to Am fr_dFe = 0.3_dbl_kind ! fraction of remineralized nitrogen ! (in units of algal iron) - k_nitrif = c0 ! nitrification rate (1/day) + k_nitrif = c0 ! nitrification rate (1/day) t_iron_conv = 3065.0_dbl_kind ! desorption loss pFe to dFe (day) - max_loss = 0.9_dbl_kind ! restrict uptake to % of remaining value - max_dfe_doc1 = 0.2_dbl_kind ! max ratio of dFe to saccharides in the ice - !(nM Fe/muM C) + max_loss = 0.9_dbl_kind ! restrict uptake to % of remaining value + max_dfe_doc1 = 0.2_dbl_kind ! max ratio of dFe to saccharides in the ice + !(nM Fe/muM C) fr_resp_s = 0.75_dbl_kind ! DMSPd fraction of respiration loss as DMSPd y_sk_DMS = p5 ! fraction conversion given high yield t_sk_conv = 3.0_dbl_kind ! Stefels conversion time (d) @@ -1296,11 +1296,11 @@ subroutine input_zbgc F_abs_chl_diatoms = 2.0_dbl_kind ! scales absorbed radiation for dEdd F_abs_chl_sp = 4.0_dbl_kind F_abs_chl_phaeo = 5.0 - ratio_C2N_proteins = 7.0_dbl_kind ! ratio of C to N in proteins (mol/mol) + ratio_C2N_proteins = 7.0_dbl_kind ! ratio of C to N in proteins (mol/mol) ! z salinity parameters - grid_oS = c5 ! for bottom flux - l_skS = 7.0_dbl_kind ! characteristic diffusive scale (m) + grid_oS = c5 ! for bottom flux + l_skS = 7.0_dbl_kind ! characteristic diffusive scale (m) !----------------------------------------------------------------- ! read from input file @@ -1333,10 +1333,10 @@ subroutine input_zbgc ! broadcast !----------------------------------------------------------------- - call broadcast_scalar(solve_zsal, master_task) - call broadcast_scalar(restart_zsal, master_task) + call broadcast_scalar(solve_zsal, master_task) + call broadcast_scalar(restart_zsal, master_task) call broadcast_scalar(tr_brine, master_task) - call broadcast_scalar(restart_hbrine, master_task) + call broadcast_scalar(restart_hbrine, master_task) call broadcast_scalar(phi_snow, master_task) call broadcast_scalar(grid_oS, master_task) @@ -1354,14 +1354,14 @@ subroutine input_zbgc call broadcast_scalar(tr_bgc_Am, master_task) call broadcast_scalar(tr_bgc_Sil, master_task) call broadcast_scalar(tr_bgc_hum, master_task) - call broadcast_scalar(tr_bgc_DMS, master_task) - call broadcast_scalar(tr_bgc_PON, master_task) - call broadcast_scalar(tr_bgc_DON, master_task) - call broadcast_scalar(tr_bgc_Fe, master_task) + call broadcast_scalar(tr_bgc_DMS, master_task) + call broadcast_scalar(tr_bgc_PON, master_task) + call broadcast_scalar(tr_bgc_DON, master_task) + call broadcast_scalar(tr_bgc_Fe, master_task) call broadcast_scalar(z_tracers, master_task) call broadcast_scalar(tr_zaero, master_task) - call broadcast_scalar(dEdd_algae, master_task) + call broadcast_scalar(dEdd_algae, master_task) call broadcast_scalar(modal_aero, master_task) call broadcast_scalar(optics_file, master_task) call broadcast_scalar(optics_file_fieldname, master_task) @@ -1395,31 +1395,31 @@ subroutine input_zbgc call broadcast_scalar(chlabs_diatoms , master_task) call broadcast_scalar(chlabs_sp , master_task) call broadcast_scalar(chlabs_phaeo , master_task) - call broadcast_scalar(alpha2max_low_diatoms , master_task) + call broadcast_scalar(alpha2max_low_diatoms , master_task) call broadcast_scalar(alpha2max_low_sp , master_task) call broadcast_scalar(alpha2max_low_phaeo , master_task) - call broadcast_scalar(beta2max_diatoms , master_task) - call broadcast_scalar(beta2max_sp , master_task) - call broadcast_scalar(beta2max_phaeo , master_task) - call broadcast_scalar(mu_max_diatoms , master_task) - call broadcast_scalar(mu_max_sp , master_task) - call broadcast_scalar(mu_max_phaeo , master_task) - call broadcast_scalar(grow_Tdep_diatoms, master_task) - call broadcast_scalar(grow_Tdep_sp , master_task) - call broadcast_scalar(grow_Tdep_phaeo , master_task) - call broadcast_scalar(fr_graze_diatoms , master_task) - call broadcast_scalar(fr_graze_sp , master_task) - call broadcast_scalar(fr_graze_phaeo , master_task) - call broadcast_scalar(mort_pre_diatoms , master_task) - call broadcast_scalar(mort_pre_sp , master_task) - call broadcast_scalar(mort_pre_phaeo , master_task) - call broadcast_scalar(mort_Tdep_diatoms, master_task) - call broadcast_scalar(mort_Tdep_sp , master_task) - call broadcast_scalar(mort_Tdep_phaeo , master_task) - call broadcast_scalar(k_exude_diatoms , master_task) - call broadcast_scalar(k_exude_sp , master_task) - call broadcast_scalar(k_exude_phaeo , master_task) - call broadcast_scalar(K_Nit_diatoms , master_task) + call broadcast_scalar(beta2max_diatoms , master_task) + call broadcast_scalar(beta2max_sp , master_task) + call broadcast_scalar(beta2max_phaeo , master_task) + call broadcast_scalar(mu_max_diatoms , master_task) + call broadcast_scalar(mu_max_sp , master_task) + call broadcast_scalar(mu_max_phaeo , master_task) + call broadcast_scalar(grow_Tdep_diatoms, master_task) + call broadcast_scalar(grow_Tdep_sp , master_task) + call broadcast_scalar(grow_Tdep_phaeo , master_task) + call broadcast_scalar(fr_graze_diatoms , master_task) + call broadcast_scalar(fr_graze_sp , master_task) + call broadcast_scalar(fr_graze_phaeo , master_task) + call broadcast_scalar(mort_pre_diatoms , master_task) + call broadcast_scalar(mort_pre_sp , master_task) + call broadcast_scalar(mort_pre_phaeo , master_task) + call broadcast_scalar(mort_Tdep_diatoms, master_task) + call broadcast_scalar(mort_Tdep_sp , master_task) + call broadcast_scalar(mort_Tdep_phaeo , master_task) + call broadcast_scalar(k_exude_diatoms , master_task) + call broadcast_scalar(k_exude_sp , master_task) + call broadcast_scalar(k_exude_phaeo , master_task) + call broadcast_scalar(K_Nit_diatoms , master_task) call broadcast_scalar(K_Nit_sp , master_task) call broadcast_scalar(K_Nit_phaeo , master_task) call broadcast_scalar(K_Am_diatoms , master_task) @@ -1435,17 +1435,17 @@ subroutine input_zbgc call broadcast_scalar(kn_bac_protein , master_task) call broadcast_scalar(f_don_Am_protein , master_task) call broadcast_scalar(f_doc_s , master_task) - call broadcast_scalar(f_doc_l , master_task) + call broadcast_scalar(f_doc_l , master_task) call broadcast_scalar(f_exude_s , master_task) call broadcast_scalar(f_exude_l , master_task) - call broadcast_scalar(k_bac_s , master_task) + call broadcast_scalar(k_bac_s , master_task) call broadcast_scalar(k_bac_l , master_task) call broadcast_scalar(T_max , master_task) call broadcast_scalar(fsal , master_task) call broadcast_scalar(op_dep_min , master_task) - call broadcast_scalar(fr_graze_s , master_task) - call broadcast_scalar(fr_graze_e , master_task) - call broadcast_scalar(fr_mort2min , master_task) + call broadcast_scalar(fr_graze_s , master_task) + call broadcast_scalar(fr_graze_e , master_task) + call broadcast_scalar(fr_mort2min , master_task) call broadcast_scalar(fr_dFe , master_task) call broadcast_scalar(k_nitrif , master_task) call broadcast_scalar(t_iron_conv , master_task) @@ -1453,18 +1453,18 @@ subroutine input_zbgc call broadcast_scalar(max_dfe_doc1 , master_task) call broadcast_scalar(fr_resp_s , master_task) call broadcast_scalar(y_sk_DMS , master_task) - call broadcast_scalar(t_sk_conv , master_task) + call broadcast_scalar(t_sk_conv , master_task) call broadcast_scalar(t_sk_ox , master_task) call broadcast_scalar(algaltype_diatoms, master_task) - call broadcast_scalar(algaltype_sp , master_task) - call broadcast_scalar(algaltype_phaeo , master_task) + call broadcast_scalar(algaltype_sp , master_task) + call broadcast_scalar(algaltype_phaeo , master_task) call broadcast_scalar(nitratetype , master_task) call broadcast_scalar(ammoniumtype , master_task) call broadcast_scalar(silicatetype , master_task) - call broadcast_scalar(dmspptype , master_task) - call broadcast_scalar(dmspdtype , master_task) + call broadcast_scalar(dmspptype , master_task) + call broadcast_scalar(dmspdtype , master_task) call broadcast_scalar(humtype , master_task) - call broadcast_scalar(doctype_s , master_task) + call broadcast_scalar(doctype_s , master_task) call broadcast_scalar(doctype_l , master_task) call broadcast_scalar(dontype_protein , master_task) call broadcast_scalar(fedtype_1 , master_task) @@ -1484,7 +1484,7 @@ subroutine input_zbgc call broadcast_scalar(F_abs_chl_diatoms , master_task) call broadcast_scalar(F_abs_chl_sp , master_task) call broadcast_scalar(F_abs_chl_phaeo , master_task) - call broadcast_scalar(ratio_C2N_proteins , master_task) + call broadcast_scalar(ratio_C2N_proteins , master_task) !----------------------------------------------------------------- ! zsalinity and brine @@ -1503,7 +1503,7 @@ subroutine input_zbgc write(nu_diag,*) subname,' ERROR: solve_zsal=T but 0 zsalinity tracers' endif abort_flag = 101 - endif + endif if (solve_zsal .and. ((.not. tr_brine) .or. (ktherm /= 1))) then if (my_task == master_task) then @@ -1517,7 +1517,7 @@ subroutine input_zbgc write(nu_diag,*) subname,' ERROR: tr_brine=T but no biology layers compiled' endif abort_flag = 103 - endif + endif !----------------------------------------------------------------- ! biogeochemistry @@ -1552,14 +1552,14 @@ subroutine input_zbgc abort_flag = 107 endif - if (dEdd_algae .AND. trim(shortwave) /= 'dEdd') then + if (dEdd_algae .AND. trim(shortwave) /= 'dEdd') then if (my_task == master_task) then write(nu_diag,*) subname,' ERROR: dEdd_algae = T but shortwave /= dEdd' endif abort_flag = 108 endif - if (dEdd_algae .AND. (.NOT. tr_bgc_N) .AND. (.NOT. tr_zaero)) then + if (dEdd_algae .AND. (.NOT. tr_bgc_N) .AND. (.NOT. tr_zaero)) then if (my_task == master_task) then write(nu_diag,*) subname,' ERROR: need tr_bgc_N or tr_zaero for dEdd_algae' endif @@ -1572,8 +1572,8 @@ subroutine input_zbgc endif abort_flag = 110 endif - - if (modal_aero .AND. trim(shortwave) /= 'dEdd') then + + if (modal_aero .AND. trim(shortwave) /= 'dEdd') then if (my_task == master_task) then write(nu_diag,*) subname,' ERROR: modal_aero = T but shortwave /= dEdd' endif @@ -1889,7 +1889,7 @@ subroutine count_tracers tr_bgc_N, tr_bgc_C, tr_bgc_chl, & tr_bgc_DON, tr_bgc_Fe, tr_zaero, & tr_bgc_hum - + logical (kind=log_kind) :: & solve_zsal, skl_bgc, z_tracers @@ -1971,7 +1971,7 @@ subroutine count_tracers nt_ipnd = ntrcr ! on level-ice ponds (if frzpnd='hlid') endif if (tr_pond_topo) then - ntrcr = ntrcr + 1 ! + ntrcr = ntrcr + 1 ! nt_ipnd = ntrcr ! refrozen pond ice lid thickness endif endif @@ -2014,7 +2014,7 @@ subroutine count_tracers !tcx, modify code so we don't have to reset n_aero here n_aero = 0 !echmod - this is not getting set correctly (overwritten later?) endif - + !----------------------------------------------------------------- ! initialize zbgc tracer indices !----------------------------------------------------------------- @@ -2755,7 +2755,7 @@ subroutine init_zbgc if (skl_bgc .or. z_tracers) then if (tr_bgc_N) then - do mm = 1, n_algae + do mm = 1, n_algae call init_bgc_trcr(nk, nt_fbri, & nt_bgc_N(mm), nlt_bgc_N(mm), & algaltype(mm), nt_depend, & @@ -2775,14 +2775,14 @@ subroutine init_zbgc nt_strata, bio_index) bio_index_o(nlt_bgc_Nit) = icepack_max_algae + 1 endif ! tr_bgc_Nit - + if (tr_bgc_C) then ! ! Algal C is not yet distinct from algal N ! * Reqires exudation and/or changing C:N ratios ! for implementation ! - ! do mm = 1,n_algae + ! do mm = 1,n_algae ! call init_bgc_trcr(nk, nt_fbri, & ! nt_bgc_C(mm), nlt_bgc_C(mm), & ! algaltype(mm), nt_depend, & @@ -2832,7 +2832,7 @@ subroutine init_zbgc trcr_base, n_trcr_strata, & nt_strata, bio_index) bio_index_o(nlt_bgc_Am) = 2*icepack_max_algae + icepack_max_doc + icepack_max_dic + 2 - endif + endif if (tr_bgc_Sil) then call init_bgc_trcr(nk, nt_fbri, & nt_bgc_Sil, nlt_bgc_Sil, & @@ -2841,7 +2841,7 @@ subroutine init_zbgc trcr_base, n_trcr_strata, & nt_strata, bio_index) bio_index_o(nlt_bgc_Sil) = 2*icepack_max_algae + icepack_max_doc + icepack_max_dic + 3 - endif + endif if (tr_bgc_DMS) then ! all together call init_bgc_trcr(nk, nt_fbri, & nt_bgc_DMSPp, nlt_bgc_DMSPp, & @@ -2866,7 +2866,7 @@ subroutine init_zbgc trcr_base, n_trcr_strata, & nt_strata, bio_index) bio_index_o(nlt_bgc_DMS) = 2*icepack_max_algae + icepack_max_doc + icepack_max_dic + 6 - endif + endif if (tr_bgc_PON) then call init_bgc_trcr(nk, nt_fbri, & nt_bgc_PON, nlt_bgc_PON, & @@ -2908,8 +2908,8 @@ subroutine init_zbgc bio_index_o(nlt_bgc_Fep(mm)) = 2*icepack_max_algae + icepack_max_doc + icepack_max_dic & + icepack_max_don + icepack_max_fe + 7 + mm enddo ! mm - endif ! tr_bgc_Fe - + endif ! tr_bgc_Fe + if (tr_bgc_hum) then call init_bgc_trcr(nk, nt_fbri, & nt_bgc_hum, nlt_bgc_hum, & @@ -2918,7 +2918,7 @@ subroutine init_zbgc trcr_base, n_trcr_strata, & nt_strata, bio_index) bio_index_o(nlt_bgc_hum) = 2*icepack_max_algae + icepack_max_doc + 8 + icepack_max_dic & - + icepack_max_don + 2*icepack_max_fe + icepack_max_aero + + icepack_max_don + 2*icepack_max_fe + icepack_max_aero endif endif ! skl_bgc or z_tracers @@ -2942,7 +2942,7 @@ subroutine init_zbgc ! and 2 snow layers (snow surface + interior) nk = nblyr + 1 - nt_depend = 2 + nt_fbri + ntd + nt_depend = 2 + nt_fbri + ntd ! z layer aerosols if (tr_zaero) then @@ -2963,15 +2963,15 @@ subroutine init_zbgc endif ! tr_zaero if (nbtrcr > 0) then - do k = 1,nbtrcr - zbgc_frac_init(k) = c1 - trcr_depend(nt_zbgc_frac+k-1) = 2+nt_fbri + do k = 1,nbtrcr + zbgc_frac_init(k) = c1 + trcr_depend(nt_zbgc_frac+k-1) = 2+nt_fbri trcr_base(nt_zbgc_frac+ k - 1,1) = c0 trcr_base(nt_zbgc_frac+ k - 1,2) = c1 trcr_base(nt_zbgc_frac+ k - 1,3) = c0 - n_trcr_strata(nt_zbgc_frac+ k - 1)= 1 + n_trcr_strata(nt_zbgc_frac+ k - 1)= 1 nt_strata(nt_zbgc_frac+ k - 1,1) = nt_fbri - nt_strata(nt_zbgc_frac+ k - 1,2) = 0 + nt_strata(nt_zbgc_frac+ k - 1,2) = 0 tau_ret(k) = c1 tau_rel(k) = c1 if (bgc_tracer_type(k) >= c0 .and. bgc_tracer_type(k) < p5) then @@ -2999,7 +2999,7 @@ subroutine init_zbgc do k = 1, nbtrcr zbgc_init_frac(k) = frazil_scav if (bgc_tracer_type(k) < c0) zbgc_init_frac(k) = initbio_frac - enddo + enddo !----------------------------------------------------------------- ! set values in icepack @@ -3016,7 +3016,7 @@ subroutine init_zbgc !----------------------------------------------------------------- ! final consistency checks - !----------------------------------------------------------------- + !----------------------------------------------------------------- if (nbtrcr > icepack_max_nbtrcr) then write (nu_diag,*) subname,' ' write (nu_diag,*) subname,'nbtrcr > icepack_max_nbtrcr' @@ -3037,13 +3037,13 @@ subroutine init_zbgc write(nu_diag,1020) ' number of bio tracers = ', nbtrcr write(nu_diag,1020) ' number of Isw tracers = ', nbtrcr_sw - + elseif (z_tracers) then - + write(nu_diag,1020) ' number of ztracers = ', nbtrcr write(nu_diag,1020) ' number of Isw tracers = ', nbtrcr_sw write(nu_diag,1000) ' initbio_frac = ', initbio_frac - write(nu_diag,1000) ' frazil_scav = ', frazil_scav + write(nu_diag,1000) ' frazil_scav = ', frazil_scav endif ! skl_bgc or solve_bgc endif ! master_task @@ -3092,7 +3092,7 @@ subroutine init_bgc_trcr(nk, nt_fbri, & integer (kind=int_kind) :: & k , & ! loop index n_strata , & ! temporary values - nt_strata1, & ! + nt_strata1, & ! nt_strata2 real (kind=dbl_kind) :: & @@ -3105,7 +3105,7 @@ subroutine init_bgc_trcr(nk, nt_fbri, & !-------- bgc_tracer_type(nlt_bgc) = bgctype - + if (nk > 1) then ! include vertical bgc in snow do k = nk, nk+1 trcr_depend (nt_bgc + k ) = 2 ! snow volume @@ -3117,10 +3117,10 @@ subroutine init_bgc_trcr(nk, nt_fbri, & nt_strata (nt_bgc + k,2) = 0 enddo - trcr_base1 = c0 - trcr_base2 = c1 + trcr_base1 = c0 + trcr_base2 = c1 trcr_base3 = c0 - n_strata = 1 + n_strata = 1 nt_strata1 = nt_fbri nt_strata2 = 0 else ! nk = 1 diff --git a/cicecore/shared/ice_restart_column.F90 b/cicecore/shared/ice_restart_column.F90 index b28ae2f60..a15f9d2c1 100644 --- a/cicecore/shared/ice_restart_column.F90 +++ b/cicecore/shared/ice_restart_column.F90 @@ -41,7 +41,7 @@ module ice_restart_column write_restart_bgc, read_restart_bgc, & write_restart_hbrine, read_restart_hbrine - logical (kind=log_kind), public :: & + logical (kind=log_kind), public :: & restart_age , & ! if .true., read age tracer restart file restart_FY , & ! if .true., read FY tracer restart file restart_lvl , & ! if .true., read lvl tracer restart file @@ -54,7 +54,7 @@ module ice_restart_column restart_fsd , & ! if .true., read floe size restart file restart_iso , & ! if .true., read isotope tracer restart file restart_aero , & ! if .true., read aerosol tracer restart file - restart_zsal , & ! if .true., read Salinity from restart file + restart_zsal , & ! if .true., read Salinity from restart file restart_hbrine , & ! if .true., read hbrine from restart file restart_bgc ! if .true., read bgc restart file @@ -302,7 +302,7 @@ end subroutine write_restart_pond_cesm subroutine read_restart_pond_cesm() - use ice_fileunits, only: nu_restart_pond + use ice_fileunits, only: nu_restart_pond use ice_state, only: trcrn ! local variables @@ -379,7 +379,7 @@ end subroutine write_restart_pond_lvl subroutine read_restart_pond_lvl() use ice_arrays_column, only: dhsn, ffracn - use ice_fileunits, only: nu_restart_pond + use ice_fileunits, only: nu_restart_pond use ice_flux, only: fsnow use ice_state, only: trcrn @@ -459,7 +459,7 @@ end subroutine write_restart_pond_topo subroutine read_restart_pond_topo() - use ice_fileunits, only: nu_restart_pond + use ice_fileunits, only: nu_restart_pond use ice_state, only: trcrn ! local variables @@ -502,7 +502,7 @@ subroutine write_restart_snow() logical (kind=log_kind) :: diag integer (kind=int_kind) :: nt_smice, nt_smliq, nt_rhos, nt_rsnw, k - character*3 ck + character(len=3) :: ck character(len=*),parameter :: subname='(write_restart_snow)' call icepack_query_tracer_indices(nt_smice_out=nt_smice, & @@ -544,7 +544,7 @@ subroutine read_restart_snow() logical (kind=log_kind) :: & diag integer (kind=int_kind) :: nt_smice, nt_smliq, nt_rhos, nt_rsnw, k - character*3 ck + character(len=3) :: ck character(len=*),parameter :: subname='(read_restart_snow)' call icepack_query_tracer_indices(nt_smice_out=nt_smice, & @@ -589,7 +589,7 @@ subroutine write_restart_fsd() logical (kind=log_kind) :: diag integer (kind=int_kind) :: nt_fsd, k - character*3 ck + character(len=3) :: ck character(len=*),parameter :: subname='(write_restart_fsd)' call icepack_query_tracer_indices(nt_fsd_out=nt_fsd) @@ -624,7 +624,7 @@ subroutine read_restart_fsd() logical (kind=log_kind) :: & diag integer (kind=int_kind) :: nt_fsd, k - character*3 ck + character(len=3) :: ck character(len=*),parameter :: subname='(read_restart_fsd)' call icepack_query_tracer_indices(nt_fsd_out=nt_fsd) @@ -660,7 +660,7 @@ subroutine write_restart_iso() logical (kind=log_kind) :: diag integer (kind=int_kind) :: nt_isosno, nt_isoice, k - character*3 ck + character(len=3) :: ck character(len=*),parameter :: subname='(write_restart_iso)' call icepack_query_tracer_indices(nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) @@ -702,7 +702,7 @@ subroutine read_restart_iso() logical (kind=log_kind) :: & diag integer (kind=int_kind) :: nt_isosno, nt_isoice, k - character*3 ck + character(len=3) :: ck character(len=*),parameter :: subname='(read_restart_iso)' call icepack_query_tracer_indices(nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) @@ -885,14 +885,14 @@ subroutine read_restart_hbrine() !$OMP PARALLEL DO PRIVATE(iblk,i,j,n,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi do n = 1, ncat if (first_ice_real(i,j,n,iblk) >= p5) then first_ice (i,j,n,iblk) = .true. @@ -900,7 +900,7 @@ subroutine read_restart_hbrine() first_ice (i,j,n,iblk) = .false. endif enddo ! ncat - enddo ! i + enddo ! i enddo ! j enddo ! iblk !$OMP END PARALLEL DO @@ -946,14 +946,14 @@ subroutine write_restart_hbrine() !$OMP PARALLEL DO PRIVATE(iblk,i,j,n,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi do n = 1, ncat ! zero out first_ice over land if (tmask(i,j,iblk) .and. first_ice (i,j,n,iblk)) then @@ -992,7 +992,7 @@ subroutine write_restart_bgc() doc, don, dic, fed, fep, zaeros, hum use ice_grid, only: tmask use ice_state, only: trcrn - use ice_flux, only: sss + use ice_flux, only: sss use ice_restart, only: write_restart_field ! local variables @@ -1011,27 +1011,27 @@ subroutine write_restart_bgc() nt_bgc_DMSPp, nt_bgc_Nit, nt_bgc_Sil, & nt_bgc_PON, nt_zbgc_frac, nt_bgc_hum, nbtrcr - integer (kind=int_kind), dimension(icepack_max_algae) :: & - nt_bgc_N , & ! diatoms, phaeocystis, pico/small - nt_bgc_C , & ! diatoms, phaeocystis, pico/small - nt_bgc_chl ! diatoms, phaeocystis, pico/small + integer (kind=int_kind), dimension(icepack_max_algae) :: & + nt_bgc_N , & ! diatoms, phaeocystis, pico/small + nt_bgc_C , & ! diatoms, phaeocystis, pico/small + nt_bgc_chl ! diatoms, phaeocystis, pico/small - integer (kind=int_kind), dimension(icepack_max_doc) :: & + integer (kind=int_kind), dimension(icepack_max_doc) :: & nt_bgc_DOC ! dissolved organic carbon - integer (kind=int_kind), dimension(icepack_max_don) :: & + integer (kind=int_kind), dimension(icepack_max_don) :: & nt_bgc_DON ! dissolved organic nitrogen - integer (kind=int_kind), dimension(icepack_max_dic) :: & + integer (kind=int_kind), dimension(icepack_max_dic) :: & nt_bgc_DIC ! dissolved inorganic carbon - integer (kind=int_kind), dimension(icepack_max_fe) :: & + integer (kind=int_kind), dimension(icepack_max_fe) :: & nt_bgc_Fed, & ! dissolved iron nt_bgc_Fep ! particulate iron - integer (kind=int_kind), dimension(icepack_max_aero) :: & + integer (kind=int_kind), dimension(icepack_max_aero) :: & nt_zaero ! black carbon and other aerosols - + logical (kind=log_kind) :: tr_bgc_Nit, tr_bgc_Am, tr_bgc_Sil,& tr_bgc_DMS, tr_bgc_PON, tr_bgc_N, tr_bgc_C, & tr_bgc_DON, tr_bgc_Fe, tr_zaero , tr_bgc_chl, & @@ -1102,19 +1102,19 @@ subroutine write_restart_bgc() !----------------------------------------------------------------- ! Salinity and extras !----------------------------------------------------------------- - if (solve_zsal) then + if (solve_zsal) then do k = 1,nblyr write(nchar,'(i3.3)') k call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_S+k-1,:,:),'ruf8', & 'zSalinity'//trim(nchar),ncat,diag) enddo - + call write_restart_field(nu_dump_bgc,0,sss,'ruf8','sss',1,diag) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo @@ -1188,7 +1188,7 @@ subroutine write_restart_bgc() if (tr_bgc_PON) & call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_PON,:,:), & 'ruf8','bgc_PON',ncat,diag) - + if (tr_bgc_DON) then do k = 1, n_don write(nchar,'(i3.3)') k @@ -1197,19 +1197,19 @@ subroutine write_restart_bgc() enddo endif if (tr_bgc_Fe ) then - do k = 1, n_fed + do k = 1, n_fed write(nchar,'(i3.3)') k call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_Fed (k),:,:), & 'ruf8','bgc_Fed'//trim(nchar),ncat,diag) enddo - do k = 1, n_fep + do k = 1, n_fep write(nchar,'(i3.3)') k call write_restart_field(nu_dump_bgc,0,trcrn(:,:,nt_bgc_Fep (k),:,:), & 'ruf8','bgc_Fep'//trim(nchar),ncat,diag) enddo endif - else + else !----------------------------------------------------------------- ! Z layer BGC @@ -1380,7 +1380,7 @@ subroutine write_restart_bgc() write(nchar,'(i3.3)') k call write_restart_field(nu_dump_bgc,0,dic(:,:,k,:),'ruf8','dic'//trim(nchar),1,diag) enddo !k - endif + endif if (tr_bgc_Nit) & call write_restart_field(nu_dump_bgc,0,nit, 'ruf8','nit', 1,diag) if (tr_bgc_Am) & @@ -1433,7 +1433,7 @@ subroutine read_restart_bgc() use ice_domain_size, only: ncat, n_algae, n_doc, n_dic,& n_don, n_zaero, n_fed, n_fep use ice_fileunits, only: nu_restart_bgc - use ice_flux, only: sss + use ice_flux, only: sss use ice_flux_bgc, only: nit, amm, sil, dmsp, dms, algalN, & doc, don, dic, fed, fep, zaeros, hum use ice_state, only: trcrn @@ -1456,27 +1456,27 @@ subroutine read_restart_bgc() nt_bgc_DMSPp, nt_bgc_Nit, nt_bgc_Sil, & nt_bgc_PON, nt_zbgc_frac, nt_bgc_hum, nbtrcr - integer (kind=int_kind), dimension(icepack_max_algae) :: & - nt_bgc_N , & ! diatoms, phaeocystis, pico/small - nt_bgc_C , & ! diatoms, phaeocystis, pico/small - nt_bgc_chl ! diatoms, phaeocystis, pico/small + integer (kind=int_kind), dimension(icepack_max_algae) :: & + nt_bgc_N , & ! diatoms, phaeocystis, pico/small + nt_bgc_C , & ! diatoms, phaeocystis, pico/small + nt_bgc_chl ! diatoms, phaeocystis, pico/small - integer (kind=int_kind), dimension(icepack_max_doc) :: & + integer (kind=int_kind), dimension(icepack_max_doc) :: & nt_bgc_DOC ! dissolved organic carbon - integer (kind=int_kind), dimension(icepack_max_don) :: & + integer (kind=int_kind), dimension(icepack_max_don) :: & nt_bgc_DON ! dissolved organic nitrogen - integer (kind=int_kind), dimension(icepack_max_dic) :: & + integer (kind=int_kind), dimension(icepack_max_dic) :: & nt_bgc_DIC ! dissolved inorganic carbon - integer (kind=int_kind), dimension(icepack_max_fe) :: & + integer (kind=int_kind), dimension(icepack_max_fe) :: & nt_bgc_Fed, & ! dissolved iron nt_bgc_Fep ! particulate iron - integer (kind=int_kind), dimension(icepack_max_aero) :: & + integer (kind=int_kind), dimension(icepack_max_aero) :: & nt_zaero ! black carbon and other aerosols - + logical (kind=log_kind) :: tr_bgc_Nit, tr_bgc_Am, tr_bgc_Sil,& tr_bgc_DMS, tr_bgc_PON, tr_bgc_N, tr_bgc_C, & tr_bgc_DON, tr_bgc_Fe, tr_zaero , tr_bgc_chl, & @@ -1514,7 +1514,7 @@ subroutine read_restart_bgc() ! Salinity and extras !----------------------------------------------------------------- - if (restart_zsal) then + if (restart_zsal) then if (my_task == master_task) write(nu_diag,*) subname,'zSalinity restart' do k = 1,nblyr @@ -1522,21 +1522,21 @@ subroutine read_restart_bgc() call read_restart_field(nu_restart_bgc,0,trcrn(:,:,nt_bgc_S+k-1,:,:),'ruf8', & 'zSalinity'//trim(nchar),ncat,diag,field_loc_center,field_type_scalar) enddo - + if (my_task == master_task) write(nu_diag,*) subname,'sea surface salinity' call read_restart_field(nu_restart_bgc,0,sss,'ruf8','sss',1,diag) call read_restart_field(nu_restart_bgc,0,Rayleigh_real,'ruf8','Rayleigh',1,diag) !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) + this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo ihi = this_block%ihi jlo = this_block%jlo jhi = this_block%jhi do j = jlo, jhi - do i = ilo, ihi + do i = ilo, ihi if (Rayleigh_real (i,j,iblk) .GE. c1) then Rayleigh_criteria (i,j,iblk) = .true. elseif (Rayleigh_real (i,j,iblk) < c1) then @@ -1618,13 +1618,13 @@ subroutine read_restart_bgc() enddo endif if (tr_bgc_Fe) then - do k = 1, n_fed + do k = 1, n_fed write(nchar,'(i3.3)') k call read_restart_field(nu_restart_bgc,0, & trcrn(:,:,nt_bgc_Fed (k),:,:), & 'ruf8','bgc_Fed'//trim(nchar),ncat,diag) enddo - do k = 1, n_fep + do k = 1, n_fep write(nchar,'(i3.3)') k call read_restart_field(nu_restart_bgc,0, & trcrn(:,:,nt_bgc_Fep (k),:,:), & @@ -1862,7 +1862,7 @@ subroutine read_restart_bgc() enddo !k endif endif ! restart_bgc - + end subroutine read_restart_bgc !======================================================================= diff --git a/cicecore/shared/ice_restart_shared.F90 b/cicecore/shared/ice_restart_shared.F90 index 6578ef3ad..7c178fec0 100644 --- a/cicecore/shared/ice_restart_shared.F90 +++ b/cicecore/shared/ice_restart_shared.F90 @@ -41,7 +41,7 @@ module ice_restart_shared integer function lenstr(label) - character*(*) label + character(len=*) :: label character(len=*),parameter :: subname='(lenstr)' diff --git a/cicecore/shared/ice_spacecurve.F90 b/cicecore/shared/ice_spacecurve.F90 index 931b2312b..205c50e77 100644 --- a/cicecore/shared/ice_spacecurve.F90 +++ b/cicecore/shared/ice_spacecurve.F90 @@ -4,8 +4,8 @@ module ice_spacecurve ! !DESCRIPTION: -! This module contains routines necessary to -! create space-filling curves. +! This module contains routines necessary to +! create space-filling curves. ! ! !REVISION HISTORY: ! @@ -22,7 +22,7 @@ module ice_spacecurve implicit none private -! !PUBLIC TYPES: +! !PUBLIC TYPES: type, public :: factor_t integer(int_kind) :: numfact ! The # of factors for a value @@ -30,7 +30,7 @@ module ice_spacecurve integer(int_kind), dimension(:), pointer :: used end type -! !PUBLIC MEMBER FUNCTIONS: +! !PUBLIC MEMBER FUNCTIONS: public :: GenSpaceCurve @@ -53,11 +53,10 @@ module ice_spacecurve FindandMark integer(int_kind), dimension(:,:), allocatable :: & - dir, &! direction to move along each level - ordered ! the ordering + ordered ! the ordering integer(int_kind), dimension(:), allocatable :: & pos ! position along each of the axes - + integer(int_kind) :: & maxdim, &! dimensionality of entire space vcnt ! visitation count @@ -68,7 +67,7 @@ module ice_spacecurve !EOC !*********************************************************************** -contains +contains !*********************************************************************** !BOP @@ -79,19 +78,19 @@ recursive function Cinco(l,type,ma,md,ja,jd) result(ierr) ! !DESCRIPTION: ! This subroutine implements a Cinco space-filling curve. -! Cinco curves connect a Nb x Nb block of points where +! Cinco curves connect a Nb x Nb block of points where ! -! Nb = 5^p +! Nb = 5^p ! ! !REVISION HISTORY: ! same as module ! -! !INPUT PARAMETERS +! !INPUT PARAMETERS integer(int_kind), intent(in) :: & - l, & ! level of the space-filling curve + l, & ! level of the space-filling curve type, & ! type of SFC curve ma, & ! Major axis [0,1] md, & ! direction of major axis [-1,1] @@ -115,8 +114,8 @@ recursive function Cinco(l,type,ma,md,ja,jd) result(ierr) lmd, &! local major direction (next level) lja, &! local joiner axis (next level) ljd, &! local joiner direction (next level) - ltype, &! type of SFC on next level - ll ! next level down + ltype, &! type of SFC on next level + ll ! next level down character(len=*),parameter :: subname='(Cinco)' @@ -589,8 +588,8 @@ end function Cinco recursive function PeanoM(l,type,ma,md,ja,jd) result(ierr) ! !DESCRIPTION: -! This function implements a meandering Peano -! space-filling curve. A meandering Peano curve +! This function implements a meandering Peano +! space-filling curve. A meandering Peano curve ! connects a Nb x Nb block of points where ! ! Nb = 3^p @@ -947,8 +946,8 @@ end function hilbert function IncrementCurve(ja,jd) result(ierr) ! !DESCRIPTION: -! This function creates the curve which is stored in the -! the ordered array. The curve is implemented by +! This function creates the curve which is stored in the +! the ordered array. The curve is implemented by ! incrementing the curve in the direction [jd] of axis [ja]. ! ! !REVISION HISTORY: @@ -990,7 +989,7 @@ end function IncrementCurve function log2( n) ! !DESCRIPTION: -! This function calculates the log2 of its integer +! This function calculates the log2 of its integer ! input. ! ! !REVISION HISTORY: @@ -999,8 +998,8 @@ function log2( n) ! !INPUT PARAMETERS: integer(int_kind), intent(in) :: n ! integer value to find the log2 - -! !OUTPUT PARAMETERS: + +! !OUTPUT PARAMETERS: integer(int_kind) :: log2 @@ -1030,10 +1029,10 @@ function log2( n) else ! n > 1 log2 = 1 tmp =n - do while (tmp > 1 .and. tmp/2 .ne. 1) + do while (tmp > 1 .and. tmp/2 .ne. 1) tmp=tmp/2 log2=log2+1 - enddo + enddo endif !EOP @@ -1048,9 +1047,9 @@ end function log2 ! !INTERFACE: function IsLoadBalanced(nelem,npart) - + ! !DESCRIPTION: -! This function determines if we can create +! This function determines if we can create ! a perfectly load-balanced partitioning. ! ! !REVISION HISTORY: @@ -1063,7 +1062,7 @@ function IsLoadBalanced(nelem,npart) npart ! size of partition ! !OUTPUT PARAMETERS: - logical :: IsLoadBalanced ! .TRUE. if a perfectly load balanced + logical :: IsLoadBalanced ! .TRUE. if a perfectly load balanced ! partition is possible !EOP !BOC @@ -1080,7 +1079,7 @@ function IsLoadBalanced(nelem,npart) !----------------------------------------------------------------------- tmp1 = nelem/npart - if (npart*tmp1 == nelem ) then + if (npart*tmp1 == nelem ) then IsLoadBalanced=.TRUE. else IsLoadBalanced=.FALSE. @@ -1129,7 +1128,7 @@ function GenCurve(l,type,ma,md,ja,jd) result(ierr) !----------------------------------------------------------------------- !------------------------------------------------- - ! create the space-filling curve on the next level + ! create the space-filling curve on the next level !------------------------------------------------- if(type == 2) then @@ -1140,7 +1139,7 @@ function GenCurve(l,type,ma,md,ja,jd) result(ierr) if (f3 .and. my_task == master_task) write(nu_diag,*) subname,' calling PeanoM (3)' ierr = PeanoM(l,type,ma,md,ja,jd) f3 = .false. - elseif ( type == 5) then + elseif ( type == 5) then if (f5 .and. my_task == master_task) write(nu_diag,*) subname,' calling Cinco (5)' ierr = Cinco(l,type,ma,md,ja,jd) f5 = .false. @@ -1263,7 +1262,7 @@ end subroutine PrintFactor function Factor(num) result(res) ! !DESCRIPTION: -! This function factors the input value num into a +! This function factors the input value num into a ! product of 2,3, and 5. ! ! !REVISION HISTORY: @@ -1350,8 +1349,8 @@ function Factor(num) result(res) enddo !------------------------------------ - ! make sure that the input value - ! only contains factors of 2,3,and 5 + ! make sure that the input value + ! only contains factors of 2,3,and 5 !------------------------------------ tmp=1 do i=1,n @@ -1373,10 +1372,10 @@ end function Factor ! !INTERFACE: function IsFactorable(n) - + ! !DESCRIPTION: ! This function determines if we can factor -! n into 2,3,and 5. +! n into 2,3,and 5. ! ! !REVISION HISTORY: ! same as module @@ -1420,7 +1419,7 @@ end function IsFactorable subroutine map(l) ! !DESCRIPTION: -! Interface routine between internal subroutines and public +! Interface routine between internal subroutines and public ! subroutines. ! ! !REVISION HISTORY: @@ -1471,7 +1470,7 @@ subroutine PrintCurve(Mesh) ! !DESCRIPTION: -! This subroutine prints the several low order +! This subroutine prints the several low order ! space-filling curves in an easy to read format ! ! !REVISION HISTORY: @@ -1693,7 +1692,7 @@ end subroutine PrintCurve subroutine GenSpaceCurve(Mesh) ! !DESCRIPTION: -! This subroutine is the public interface into the +! This subroutine is the public interface into the ! space-filling curve functionality ! ! !REVISION HISTORY: @@ -1717,7 +1716,7 @@ subroutine GenSpaceCurve(Mesh) dim ! dimension of SFC... currently limited to 2D integer(int_kind) :: gridsize ! number of points on a side - + character(len=*),parameter :: subname='(GenSpaceCurve)' !----------------------------------------------------------------------- @@ -1743,19 +1742,19 @@ subroutine GenSpaceCurve(Mesh) ! Setup the working arrays for the traversal !-------------------------------------------- allocate(pos(0:dim-1)) - + !----------------------------------------------------- ! The array ordered will contain the visitation order !----------------------------------------------------- ordered(:,:) = 0 - call map(level) + call map(level) Mesh(:,:) = ordered(:,:) deallocate(pos,ordered) - end subroutine GenSpaceCurve + end subroutine GenSpaceCurve !EOC !----------------------------------------------------------------------- diff --git a/configuration/scripts/cice.batch.csh b/configuration/scripts/cice.batch.csh index 43ce00010..fbe172f51 100755 --- a/configuration/scripts/cice.batch.csh +++ b/configuration/scripts/cice.batch.csh @@ -11,43 +11,7 @@ endif set jobfile = $1 -set ntasks = ${ICE_NTASKS} -set nthrds = ${ICE_NTHRDS} -set maxtpn = ${ICE_MACHINE_TPNODE} -set acct = ${ICE_ACCOUNT} - -@ ncores = ${ntasks} * ${nthrds} -@ taskpernode = ${maxtpn} / $nthrds -if (${taskpernode} == 0) set taskpernode = 1 -@ nnodes = ${ntasks} / ${taskpernode} -if (${nnodes} * ${taskpernode} < ${ntasks}) @ nnodes = $nnodes + 1 -set taskpernodelimit = ${taskpernode} -if (${taskpernodelimit} > ${ntasks}) set taskpernodelimit = ${ntasks} -@ corespernode = ${taskpernodelimit} * ${nthrds} - -set ptile = $taskpernode -if ($ptile > ${maxtpn} / 2) @ ptile = ${maxtpn} / 2 - -set runlength = ${ICE_RUNLENGTH} -if ($?ICE_MACHINE_MAXRUNLENGTH) then - if (${runlength} > ${ICE_MACHINE_MAXRUNLENGTH}) then - set runlength = ${ICE_MACHINE_MAXRUNLENGTH} - endif -endif - -set queue = "${ICE_QUEUE}" -set batchtime = "00:15:00" -if (${runlength} == 0) set batchtime = "00:29:00" -if (${runlength} == 1) set batchtime = "00:59:00" -if (${runlength} == 2) set batchtime = "2:00:00" -if (${runlength} == 3) set batchtime = "3:00:00" -if (${runlength} == 4) set batchtime = "4:00:00" -if (${runlength} == 5) set batchtime = "5:00:00" -if (${runlength} == 6) set batchtime = "6:00:00" -if (${runlength} == 7) set batchtime = "7:00:00" -if (${runlength} >= 8) set batchtime = "8:00:00" - -set shortcase = `echo ${ICE_CASENAME} | cut -c1-15` +source ${ICE_SCRIPTS}/setup_machparams.csh #========================================== @@ -284,7 +248,7 @@ cat >> ${jobfile} << EOFB #SBATCH --nodes ${nnodes} #SBATCH --ntasks ${ntasks} #SBATCH --cpus-per-task ${nthrds} -#SBATCH --mem-per-cpu=5G +#SBATCH --mem-per-cpu=${batchmem}G #SBATCH --comment="image=eccc/eccc_all_default_ubuntu-18.04-amd64_latest" EOFB diff --git a/configuration/scripts/cice.launch.csh b/configuration/scripts/cice.launch.csh index a4b6ca37d..bc9ff2b99 100755 --- a/configuration/scripts/cice.launch.csh +++ b/configuration/scripts/cice.launch.csh @@ -8,18 +8,7 @@ echo "running cice.launch.csh" set jobfile = $1 -set ntasks = ${ICE_NTASKS} -set nthrds = ${ICE_NTHRDS} -set maxtpn = ${ICE_MACHINE_TPNODE} - -@ ncores = ${ntasks} * ${nthrds} -@ taskpernode = ${maxtpn} / $nthrds -if (${taskpernode} == 0) set taskpernode = 1 -@ nnodes = ${ntasks} / ${taskpernode} -if (${nnodes} * ${taskpernode} < ${ntasks}) @ nnodes = $nnodes + 1 -set taskpernodelimit = ${taskpernode} -if (${taskpernodelimit} > ${ntasks}) set taskpernodelimit = ${ntasks} -@ corespernode = ${taskpernodelimit} * ${nthrds} +source ${ICE_SCRIPTS}/setup_machparams.csh #========================================== if (${ICE_MACHINE} =~ cheyenne*) then diff --git a/configuration/scripts/cice.settings b/configuration/scripts/cice.settings index 9b57aab3f..76ae6ad9e 100644 --- a/configuration/scripts/cice.settings +++ b/configuration/scripts/cice.settings @@ -32,6 +32,7 @@ setenv ICE_BFBCOMP undefined setenv ICE_BFBTYPE restart setenv ICE_SPVAL undefined setenv ICE_RUNLENGTH -1 +setenv ICE_MEMUSE -1 setenv ICE_ACCOUNT undefined setenv ICE_QUEUE undefined diff --git a/configuration/scripts/machines/Macros.cheyenne_gnu b/configuration/scripts/machines/Macros.cheyenne_gnu index 082130f77..5d3859ec8 100644 --- a/configuration/scripts/machines/Macros.cheyenne_gnu +++ b/configuration/scripts/machines/Macros.cheyenne_gnu @@ -12,7 +12,7 @@ FFLAGS := -fconvert=big-endian -fbacktrace -ffree-line-length-none FFLAGS_NOOPT:= -O0 ifeq ($(ICE_BLDDEBUG), true) - FFLAGS += -O0 -g -fcheck=bounds -finit-real=nan -fimplicit-none -ffpe-trap=invalid,zero,overflow + FFLAGS += -O0 -g -fcheck=bounds -finit-real=nan -fimplicit-none -ffpe-trap=invalid,zero,overflow --std f2008 # FFLAGS += -O0 -g -fcheck=all -finit-real=snan -fimplicit-none -ffpe-trap=invalid,zero,overflow CFLAGS += -O0 endif diff --git a/configuration/scripts/machines/Macros.cheyenne_intel b/configuration/scripts/machines/Macros.cheyenne_intel index 52fc07ebb..6fb3a002a 100644 --- a/configuration/scripts/machines/Macros.cheyenne_intel +++ b/configuration/scripts/machines/Macros.cheyenne_intel @@ -13,7 +13,7 @@ FFLAGS_NOOPT:= -O0 ifeq ($(ICE_BLDDEBUG), true) # FFLAGS += -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created -link_mpi=dbg - FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -link_mpi=dbg + FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -link_mpi=dbg -stand f08 # FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -init=snan,arrays -link_mpi=dbg else FFLAGS += -O2 diff --git a/configuration/scripts/machines/env.cheyenne_gnu b/configuration/scripts/machines/env.cheyenne_gnu index c962c35f3..fb29543f8 100755 --- a/configuration/scripts/machines/env.cheyenne_gnu +++ b/configuration/scripts/machines/env.cheyenne_gnu @@ -31,8 +31,9 @@ endif if ($?ICE_BFBTYPE) then if ($ICE_BFBTYPE =~ qcchk*) then - module load python - source /glade/u/apps/opt/ncar_pylib/ncar_pylib.csh default + module load conda +# conda env create -f ../../configuration/scripts/tests/qctest.yml + conda activate qctest endif endif diff --git a/configuration/scripts/machines/env.cheyenne_intel b/configuration/scripts/machines/env.cheyenne_intel index 89a8920b6..2c6eedec6 100755 --- a/configuration/scripts/machines/env.cheyenne_intel +++ b/configuration/scripts/machines/env.cheyenne_intel @@ -31,8 +31,9 @@ endif if ($?ICE_BFBTYPE) then if ($ICE_BFBTYPE =~ qcchk*) then - module load python - source /glade/u/apps/opt/ncar_pylib/ncar_pylib.csh default + module load conda +# conda env create -f ../../configuration/scripts/tests/qctest.yml + conda activate qctest endif endif diff --git a/configuration/scripts/machines/env.cheyenne_pgi b/configuration/scripts/machines/env.cheyenne_pgi index 5caa9d992..e6e339f08 100755 --- a/configuration/scripts/machines/env.cheyenne_pgi +++ b/configuration/scripts/machines/env.cheyenne_pgi @@ -31,8 +31,9 @@ endif if ($?ICE_BFBTYPE) then if ($ICE_BFBTYPE =~ qcchk*) then - module load python - source /glade/u/apps/opt/ncar_pylib/ncar_pylib.csh default + module load conda +# conda env create -f ../../configuration/scripts/tests/qctest.yml + conda activate qctest endif endif diff --git a/configuration/scripts/options/set_env.memlarge b/configuration/scripts/options/set_env.memlarge new file mode 100644 index 000000000..2572e3ae7 --- /dev/null +++ b/configuration/scripts/options/set_env.memlarge @@ -0,0 +1,2 @@ +setenv ICE_MEMUSE 8 + diff --git a/configuration/scripts/options/set_env.memmed b/configuration/scripts/options/set_env.memmed new file mode 100644 index 000000000..5d7169268 --- /dev/null +++ b/configuration/scripts/options/set_env.memmed @@ -0,0 +1,2 @@ +setenv ICE_MEMUSE 4 + diff --git a/configuration/scripts/options/set_env.memsmall b/configuration/scripts/options/set_env.memsmall new file mode 100644 index 000000000..dc9e3c1ee --- /dev/null +++ b/configuration/scripts/options/set_env.memsmall @@ -0,0 +1,2 @@ +setenv ICE_MEMUSE 1 + diff --git a/configuration/scripts/setup_machparams.csh b/configuration/scripts/setup_machparams.csh new file mode 100755 index 000000000..db9f00244 --- /dev/null +++ b/configuration/scripts/setup_machparams.csh @@ -0,0 +1,64 @@ +#!/bin/csh -f + +# inputs +# mpi tasks +set ntasks = ${ICE_NTASKS} +# threads +set nthrds = ${ICE_NTHRDS} +# max tasks per node +set maxtpn = ${ICE_MACHINE_TPNODE} +# batch charge account +set acct = ${ICE_ACCOUNT} + +# compute total cores needed and distribution of cores on nodes +# ncores = total cores needed (tasks * threads) +# taskpernode = number of MPI tasks per node based on size of node and threads +# nodes = number of total nodes needed based on tasks/threads +# taskpernodelimit = max(taskpernode, ntasks), when using less than 1 node +# corespernode = number of cores per node used +@ ncores = ${ntasks} * ${nthrds} +@ taskpernode = ${maxtpn} / $nthrds +if (${taskpernode} == 0) set taskpernode = 1 +@ nnodes = ${ntasks} / ${taskpernode} +if (${nnodes} * ${taskpernode} < ${ntasks}) @ nnodes = $nnodes + 1 +set taskpernodelimit = ${taskpernode} +if (${taskpernodelimit} > ${ntasks}) set taskpernodelimit = ${ntasks} +@ corespernode = ${taskpernodelimit} * ${nthrds} + +set runlength = ${ICE_RUNLENGTH} +if ($?ICE_MACHINE_MAXRUNLENGTH) then + if (${runlength} > ${ICE_MACHINE_MAXRUNLENGTH}) then + set runlength = ${ICE_MACHINE_MAXRUNLENGTH} + endif +endif + +set memuse = ${ICE_MEMUSE} +if ($?ICE_MACHINE_MAXMEMUSE) then + if (${memuse} > ${ICE_MACHINE_MAXMEMUSE}) then + set memuse = ${ICE_MACHINE_MAXMEMUSE} + endif +endif + +set queue = "${ICE_QUEUE}" +set batchtime = "00:15:00" +if (${runlength} == 0) set batchtime = "00:29:00" +if (${runlength} == 1) set batchtime = "00:59:00" +if (${runlength} == 2) set batchtime = "2:00:00" +if (${runlength} == 3) set batchtime = "3:00:00" +if (${runlength} == 4) set batchtime = "4:00:00" +if (${runlength} == 5) set batchtime = "5:00:00" +if (${runlength} == 6) set batchtime = "6:00:00" +if (${runlength} == 7) set batchtime = "7:00:00" +if (${runlength} >= 8) set batchtime = "8:00:00" +set batchmem = "5" +if (${memuse} == 1) set batchmem = "5" +if (${memuse} == 2) set batchmem = "10" +if (${memuse} == 3) set batchmem = "15" +if (${memuse} == 4) set batchmem = "20" +if (${memuse} == 5) set batchmem = "50" +if (${memuse} == 6) set batchmem = "100" +if (${memuse} == 7) set batchmem = "150" +if (${memuse} >= 8) set batchmem = "200" + +set shortcase = `echo ${ICE_CASENAME} | cut -c1-15` + diff --git a/configuration/scripts/tests/qctest.yml b/configuration/scripts/tests/qctest.yml new file mode 100644 index 000000000..72479a563 --- /dev/null +++ b/configuration/scripts/tests/qctest.yml @@ -0,0 +1,11 @@ +name: qctest +channels: + - conda-forge + - nodefaults +dependencies: +# Python dependencies for plotting scripts + - numpy + - matplotlib-base + - cartopy + - netcdf4 + diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 500209326..a3f7d11bc 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -5,7 +5,9 @@ Implementation ======================== CICE is written in FORTRAN90 and runs on platforms using UNIX, LINUX, -and other operating systems. The code is based on a two-dimensional +and other operating systems. The current coding standard is Fortran2003 +with use of Fortran2008 feature CONTIGUOUS in the 1d evp solver. +The code is based on a two-dimensional horizontal orthogonal grid that is broken into two-dimensional horizontal blocks and parallelized over blocks with MPI and OpenMP threads. The code also includes some optimizations diff --git a/doc/source/user_guide/ug_running.rst b/doc/source/user_guide/ug_running.rst index aca7d4933..3f3cd3495 100644 --- a/doc/source/user_guide/ug_running.rst +++ b/doc/source/user_guide/ug_running.rst @@ -535,10 +535,10 @@ On macOS: .. code-block:: bash - # Download the Miniconda installer to ~/Downloads/miniconda.sh - curl -L https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -o ~/Downloads/miniconda.sh + # Download the Miniconda installer to ~/miniconda.sh + curl -L https://repo.anaconda.com/miniconda/Miniconda3-latest-MacOSX-x86_64.sh -o ~/miniconda.sh # Install Miniconda - bash ~/Downloads/miniconda.sh + bash ~/miniconda.sh # Follow the prompts diff --git a/doc/source/user_guide/ug_testing.rst b/doc/source/user_guide/ug_testing.rst index 05a16a6fb..284de72f1 100644 --- a/doc/source/user_guide/ug_testing.rst +++ b/doc/source/user_guide/ug_testing.rst @@ -1052,6 +1052,13 @@ To install the necessary Python packages, the ``pip`` Python utility can be used pip install --user matplotlib pip install --user cartopy +You can also setup a conda env with the same utitities + +.. code-block:: bash + + conda env create -f configuration/scripts/tests/qctest.yml + conda activate qctest + To run the validation test, setup a baseline run with the original baseline model and then a perturbation run based on recent model changes. Use ``--set qc`` in both runs in addition to other settings needed. Then use the QC script to compare history output, diff --git a/icepack b/icepack index 4fea17c15..3a039e598 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 4fea17c15fb63e1424cd71c0ef4365e2135d32db +Subproject commit 3a039e598e6395333a278bb1822f03e9bc954ac6 From 75ef5d260372186845d9beb41795ad930156c2b8 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Wed, 17 Aug 2022 11:40:35 -0400 Subject: [PATCH 13/57] Update ECCC machine files (#751) * machines: eccc: unify baseline directory * machines: eccc: fix modules initialization Make sure to source the Csh initialization script for environment modules ourselves, as it is not done in all environments. While at it, for convenience add I_MPI_LIBRARY_KIND=debug to the commented lines. --- configuration/scripts/machines/env.gpsc3_intel | 2 +- configuration/scripts/machines/env.ppp5_intel | 2 ++ configuration/scripts/machines/env.ppp6_gnu | 2 +- configuration/scripts/machines/env.ppp6_gnu-impi | 2 +- configuration/scripts/machines/env.ppp6_intel | 2 ++ configuration/scripts/machines/env.ppp6_intel19 | 2 +- configuration/scripts/machines/env.robert_intel | 2 ++ configuration/scripts/machines/env.underhill_intel | 2 ++ 8 files changed, 12 insertions(+), 4 deletions(-) diff --git a/configuration/scripts/machines/env.gpsc3_intel b/configuration/scripts/machines/env.gpsc3_intel index 2c8d49275..87c7834a4 100644 --- a/configuration/scripts/machines/env.gpsc3_intel +++ b/configuration/scripts/machines/env.gpsc3_intel @@ -26,7 +26,7 @@ setenv ICE_MACHINE_ENVNAME intel setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/site3/cice/runs/ setenv ICE_MACHINE_INPUTDATA /home/ords/cmdd/cmde/sice500/ -setenv ICE_MACHINE_BASELINE ~/data/site3/cice/baseline/ +setenv ICE_MACHINE_BASELINE ~/data/site3/cice/baselines/ setenv ICE_MACHINE_SUBMIT "sbatch" setenv ICE_MACHINE_TPNODE 44 setenv ICE_MACHINE_ACCT "eccc_cmdd" diff --git a/configuration/scripts/machines/env.ppp5_intel b/configuration/scripts/machines/env.ppp5_intel index 79dbf2a1b..c4987124a 100644 --- a/configuration/scripts/machines/env.ppp5_intel +++ b/configuration/scripts/machines/env.ppp5_intel @@ -12,7 +12,9 @@ source /fs/ssm/main/opt/ssmuse/ssmuse-1.11/ssmuse_1.11_all/bin/ssmuse-boot.csh > set ssmuse=`which ssmuse-csh` # Intel compiler + Intel MPI source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+mkl +# source /etc/profile.d/modules.csh # module use /home/sice500/modulefiles +# setenv I_MPI_LIBRARY_KIND debug # module load -s icc mpi setenv FOR_DUMP_CORE_FILE 1 setenv I_MPI_DEBUG_COREDUMP 1 diff --git a/configuration/scripts/machines/env.ppp6_gnu b/configuration/scripts/machines/env.ppp6_gnu index 39cc27740..69ed6ff8b 100644 --- a/configuration/scripts/machines/env.ppp6_gnu +++ b/configuration/scripts/machines/env.ppp6_gnu @@ -20,7 +20,7 @@ setenv ICE_MACHINE_ENVNAME gnu setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ -setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baseline/ +setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baselines/ setenv ICE_MACHINE_SUBMIT qsub setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT unused diff --git a/configuration/scripts/machines/env.ppp6_gnu-impi b/configuration/scripts/machines/env.ppp6_gnu-impi index f2a523bf1..461e09a43 100644 --- a/configuration/scripts/machines/env.ppp6_gnu-impi +++ b/configuration/scripts/machines/env.ppp6_gnu-impi @@ -29,7 +29,7 @@ setenv ICE_MACHINE_ENVNAME gnu-impi setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ -setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baseline/ +setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baselines/ setenv ICE_MACHINE_SUBMIT qsub setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT unused diff --git a/configuration/scripts/machines/env.ppp6_intel b/configuration/scripts/machines/env.ppp6_intel index dfaeb855f..ef9396575 100644 --- a/configuration/scripts/machines/env.ppp6_intel +++ b/configuration/scripts/machines/env.ppp6_intel @@ -12,7 +12,9 @@ source /fs/ssm/main/opt/ssmuse/ssmuse-1.11/ssmuse_1.11_all/bin/ssmuse-boot.csh > set ssmuse=`which ssmuse-csh` # Intel compiler + Intel MPI source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+mkl +# source /etc/profile.d/modules.csh # module use /home/sice500/modulefiles +# setenv I_MPI_LIBRARY_KIND debug # module load -s icc mpi setenv FOR_DUMP_CORE_FILE 1 setenv I_MPI_DEBUG_COREDUMP 1 diff --git a/configuration/scripts/machines/env.ppp6_intel19 b/configuration/scripts/machines/env.ppp6_intel19 index d41242630..6cdd9a036 100644 --- a/configuration/scripts/machines/env.ppp6_intel19 +++ b/configuration/scripts/machines/env.ppp6_intel19 @@ -30,7 +30,7 @@ setenv ICE_MACHINE_ENVNAME intel19 setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ -setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baseline/ +setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baselines/ setenv ICE_MACHINE_SUBMIT "qsub" setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT P0000000 diff --git a/configuration/scripts/machines/env.robert_intel b/configuration/scripts/machines/env.robert_intel index 43c11a529..d3d9c1eae 100644 --- a/configuration/scripts/machines/env.robert_intel +++ b/configuration/scripts/machines/env.robert_intel @@ -12,7 +12,9 @@ source /fs/ssm/main/opt/ssmuse/ssmuse-1.11/ssmuse_1.11_all/bin/ssmuse-boot.csh > set ssmuse=`which ssmuse-csh` # Intel compiler + Intel MPI source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+mkl +# source /etc/profile.d/modules.csh # module use /home/sice500/modulefiles +# setenv I_MPI_LIBRARY_KIND debug # module load -s icc mpi setenv FOR_DUMP_CORE_FILE 1 setenv I_MPI_DEBUG_COREDUMP 1 diff --git a/configuration/scripts/machines/env.underhill_intel b/configuration/scripts/machines/env.underhill_intel index 90192853e..bc3eec857 100644 --- a/configuration/scripts/machines/env.underhill_intel +++ b/configuration/scripts/machines/env.underhill_intel @@ -12,7 +12,9 @@ source /fs/ssm/main/opt/ssmuse/ssmuse-1.11/ssmuse_1.11_all/bin/ssmuse-boot.csh > set ssmuse=`which ssmuse-csh` # Intel compiler + Intel MPI source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+mkl +# source /etc/profile.d/modules.csh # module use /home/sice500/modulefiles +# setenv I_MPI_LIBRARY_KIND debug # module load -s icc mpi setenv FOR_DUMP_CORE_FILE 1 setenv I_MPI_DEBUG_COREDUMP 1 From fea412a55faf1f740934dae40230f2609d57e938 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Wed, 17 Aug 2022 11:40:48 -0400 Subject: [PATCH 14/57] ice_calendar: zero-initialize 'nstreams' (#752) The variable ice_calendar::nstreams, which corresponds to the number of output history streams to use for the run, is initialized in ice_history::init_hist depending on the number of non-'x' elements in 'histfreq' in the namelist. However, the code does use 'nstreams' before ice_history::init_hist is called, in ice_calendar::calendar when called from ice_calendar::init_calendar. Both 'init_calendar' and 'init_hist' are called from CICE_InitMod::cice_init, in that order, such that the loop that initializes 'write_history' in 'calendar' uses 'nstreams' uninitialized. 'calendar' ends up being called at least once more during 'cice_init', from ice_calendar::advance_timestep, at which point 'nstreams' is correctly defined and 'write_history' is thus correctly initialized, before its first use in 'accum_hist'. To avoid using 'nstreams' uninitialized in the first call to 'calendar' from 'init_calendar', initialize it to zero in 'init_calendar' before calling 'calendar'. This issue was discovered by compiling using the '-init=huge' flag of the Intel compiler. --- cicecore/shared/ice_calendar.F90 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cicecore/shared/ice_calendar.F90 b/cicecore/shared/ice_calendar.F90 index ad1a87b4c..7bd0c73b2 100644 --- a/cicecore/shared/ice_calendar.F90 +++ b/cicecore/shared/ice_calendar.F90 @@ -204,6 +204,10 @@ subroutine init_calendar dt_dyn = dt/real(ndtd,kind=dbl_kind) ! dynamics et al timestep force_restart_now = .false. + ! initialize nstreams to zero (will be initialized from namelist in 'init_hist') + ! this avoids using it uninitialzed in 'calendar' below + nstreams = 0 + #ifdef CESMCOUPLED ! calendar_type set by coupling #else From c87dcd3d75830a36bc71fe4578d175b246eb02e6 Mon Sep 17 00:00:00 2001 From: daveh150 Date: Mon, 22 Aug 2022 16:34:57 -0500 Subject: [PATCH 15/57] =?UTF-8?q?Added=20dxgrow,=20dygrow=20to=20facilitat?= =?UTF-8?q?e=20variable=20spaced=20grid.=20Modified=20rec=E2=80=A6=20(#746?= =?UTF-8?q?)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Added dxgrow, dygrow to facilitate variable spaced grid. Modified rectgrid to generate grid from center outward using growth factor * Adding vargrid namelist options. * Refactored rectgrid to compute dx/dy first. Then ULON/ULAT. Added scale_dxdy flag to check of want grid spacing scaled. Renamed dxgrow/dygrow to dxscale/dyscale. * Added method to check for odd nx_global/ny_global when applying grid spacing scale factors * Update comments before computing dx/dy * Update comments when checking for even/odd * made grid_lonref, grid_latref namelist varaibles. Removed vargrid_suite.ts. Updated the box nml to specify the default grid_lonref and grid_latref for future reference. * Change grid_lonref/grid_latref to lonrefrect,latrefrect. Reduce default vargrid tests to 3 per B,C,CD grid. * Make new subroutine rectgrid_scale_dxdy to implemnet grid scaling. Remove explicit latrefrec/lonrefrect from set_nml. Make dxscale,dyscale,latrefrec,lonrefrec double precition in ice_in * Add set_nml.scale1 to test vargrid with dxscale/dyscale = 1.d0 * Remove lonrefrec/latrefrec from boxnodyn * Add lonrefrect, latrafrect to documentation * Inserted new scaled grid varibles in alphabetical order in documentation --- cicecore/cicedynB/general/ice_init.F90 | 16 +- cicecore/cicedynB/infrastructure/ice_grid.F90 | 298 ++++++++++++++---- configuration/scripts/cice.batch.csh | 4 +- configuration/scripts/ice_in | 5 + configuration/scripts/machines/env.nrlssc_gnu | 4 +- configuration/scripts/options/set_nml.scale1 | 2 + configuration/scripts/options/set_nml.vargrid | 13 + configuration/scripts/tests/gridsys_suite.ts | 32 +- doc/source/user_guide/ug_case_settings.rst | 5 + 9 files changed, 310 insertions(+), 69 deletions(-) create mode 100644 configuration/scripts/options/set_nml.scale1 create mode 100644 configuration/scripts/options/set_nml.vargrid diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index c2cc986f8..e0ebdfbed 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -110,8 +110,8 @@ subroutine input_data grid_ice, grid_ice_thrm, grid_ice_dynu, grid_ice_dynv, & grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & grid_atm, grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & - dxrect, dyrect, & - pgl_global_ext + dxrect, dyrect, dxscale, dyscale, scale_dxdy, & + lonrefrect, latrefrect, pgl_global_ext use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve, & evp_algorithm, visc_method, & seabed_stress, seabed_stress_method, & @@ -212,6 +212,8 @@ subroutine input_data bathymetry_file, use_bathymetry, nfsd, bathymetry_format, & ncat, nilyr, nslyr, nblyr, & kcatbound, gridcpl_file, dxrect, dyrect, & + dxscale, dyscale, lonrefrect, latrefrect, & + scale_dxdy, & close_boundaries, orca_halogrid, grid_ice, kmt_type, & grid_atm, grid_ocn @@ -398,6 +400,11 @@ subroutine input_data ksno = 0.3_dbl_kind ! snow thermal conductivity dxrect = 0.0_dbl_kind ! user defined grid spacing in cm in x direction dyrect = 0.0_dbl_kind ! user defined grid spacing in cm in y direction + lonrefrect = -156.50_dbl_kind ! lower left corner lon for rectgrid + latrefrect = 71.35_dbl_kind ! lower left corner lat for rectgrid + scale_dxdy = .false. ! apply dxscale, dyscale to rectgrid + dxscale = 1.0_dbl_kind ! user defined rectgrid x-grid scale factor (e.g., 1.02) + dyscale = 1.0_dbl_kind ! user defined rectgrid y-grid scale factor (e.g., 1.02) close_boundaries = .false. ! true = set land on edges of grid seabed_stress= .false. ! if true, seabed stress for landfast is on seabed_stress_method = 'LKD'! LKD = Lemieux et al 2015, probabilistic = Dupont et al. in prep @@ -853,6 +860,11 @@ subroutine input_data call broadcast_scalar(grid_format, master_task) call broadcast_scalar(dxrect, master_task) call broadcast_scalar(dyrect, master_task) + call broadcast_scalar(scale_dxdy, master_task) + call broadcast_scalar(dxscale, master_task) + call broadcast_scalar(dyscale, master_task) + call broadcast_scalar(lonrefrect, master_task) + call broadcast_scalar(latrefrect, master_task) call broadcast_scalar(close_boundaries, master_task) call broadcast_scalar(grid_type, master_task) call broadcast_scalar(grid_ice, master_task) diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 523c7ea2c..723c6be76 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -129,6 +129,15 @@ module ice_grid dxrect, & ! user_specified spacing (cm) in x-direction (uniform HTN) dyrect ! user_specified spacing (cm) in y-direction (uniform HTE) + ! growth factor for variable spaced grid + real (kind=dbl_kind), public :: & + dxscale, & ! scale factor for grid spacing in x direction (e.g., 1.02) + dyscale ! scale factor for gird spacing in y direction (e.g., 1.02) + + real (kind=dbl_kind), public :: & + lonrefrect, & ! lower left lon for rectgrid + latrefrect ! lower left lat for rectgrid + ! Corners of grid boxes for history output real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & lont_bounds, & ! longitude of gridbox corners for T point @@ -171,7 +180,8 @@ module ice_grid logical (kind=log_kind), public :: & use_bathymetry, & ! flag for reading in bathymetry_file - pgl_global_ext ! flag for init primary grid lengths (global ext.) + pgl_global_ext, & ! flag for init primary grid lengths (global ext.) + scale_dxdy ! flag to apply scale factor to vary dx/dy in rectgrid logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & tmask , & ! land/boundary mask, thickness (T-cell) @@ -1360,7 +1370,7 @@ subroutine rectgrid imid, jmid real (kind=dbl_kind) :: & - length, & + length, & rad_to_deg real (kind=dbl_kind), dimension(:,:), allocatable :: & @@ -1383,69 +1393,71 @@ subroutine rectgrid allocate(work_g1(nx_global,ny_global)) - ! Weddell Sea - ! lower left corner of grid is 55W, 75S - - ! Barrow AK - ! lower left corner of grid is 156.5W, 71.35N - - if (my_task == master_task) then - work_g1 = c0 - length = dxrect*cm_to_m/radius*rad_to_deg - -! work_g1(1,:) = -55._dbl_kind ! Weddell Sea - work_g1(1,:) = -156.5_dbl_kind ! Barrow AK - - do j = 1, ny_global - do i = 2, nx_global - work_g1(i,j) = work_g1(i-1,j) + length ! ULON - enddo - enddo - work_g1(:,:) = work_g1(:,:) / rad_to_deg - endif - call gridbox_verts(work_g1,lont_bounds) - call scatter_global(ULON, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULON, distrb_info, & - ew_boundary_type, ns_boundary_type) - - if (my_task == master_task) then - work_g1 = c0 - length = dyrect*cm_to_m/radius*rad_to_deg - -! work_g1(:,1) = -75._dbl_kind ! Weddell Sea - work_g1(:,1) = 71.35_dbl_kind ! Barrow AK + if (scale_dxdy) then + ! scale grid spacing from center outward. + ! this different than original method in it + ! needs to define grid spacing before lat/lon. + ! original rectgrid defines latlon first + call rectgrid_scale_dxdy + else + ! rectgrid no grid spacing. + ! original method with addition to use namelist lat/lon reference + + if (my_task == master_task) then + work_g1 = c0 + length = dxrect*cm_to_m/radius*rad_to_deg + + work_g1(1,:) = lonrefrect ! reference lon from namelist + + do j = 1, ny_global + do i = 2, nx_global + work_g1(i,j) = work_g1(i-1,j) + length ! ULON + enddo + enddo + work_g1(:,:) = work_g1(:,:) / rad_to_deg + endif + call scatter_global(ULON, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULON, distrb_info, & + ew_boundary_type, ns_boundary_type) + + if (my_task == master_task) then + work_g1 = c0 + length = dyrect*cm_to_m/radius*rad_to_deg + + work_g1(:,1) = latrefrect ! reference latitude from namelist + + do i = 1, nx_global + do j = 2, ny_global + work_g1(i,j) = work_g1(i,j-1) + length ! ULAT + enddo + enddo + work_g1(:,:) = work_g1(:,:) / rad_to_deg + endif + call scatter_global(ULAT, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULAT, distrb_info, & + ew_boundary_type, ns_boundary_type) - do i = 1, nx_global - do j = 2, ny_global - work_g1(i,j) = work_g1(i,j-1) + length ! ULAT - enddo - enddo - work_g1(:,:) = work_g1(:,:) / rad_to_deg - endif - call gridbox_verts(work_g1,latt_bounds) - call scatter_global(ULAT, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULAT, distrb_info, & - ew_boundary_type, ns_boundary_type) + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + work_g1(i,j) = dxrect ! HTN + enddo + enddo + endif + call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE - if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - work_g1(i,j) = dxrect ! HTN - enddo - enddo - endif - call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE + if (my_task == master_task) then + do j = 1, ny_global + do i = 1, nx_global + work_g1(i,j) = dyrect ! HTE + enddo + enddo + endif + call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE - if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - work_g1(i,j) = dyrect ! HTE - enddo - enddo - endif - call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE + endif ! scale_dxdy !----------------------------------------------------------------- ! Construct T-cell land mask @@ -1526,6 +1538,168 @@ subroutine rectgrid end subroutine rectgrid +!======================================================================= + + subroutine rectgrid_scale_dxdy + + ! generate a variable spaced rectangluar grid. + ! extend spacing from center of grid outward. + use ice_constants, only: c0, c1, c2, radius, cm_to_m, & + field_loc_center, field_loc_NEcorner, field_type_scalar + + integer (kind=int_kind) :: & + i, j, iblk, & + imid, jmid, & + center1, center2 ! array centers for expanding dx, dy + + real (kind=dbl_kind) :: & + length, & + rad_to_deg + + real (kind=dbl_kind), dimension(:,:), allocatable :: & + work_g1 + + character(len=*), parameter :: subname = '(rectgrid_scale_dxdy)' + + call icepack_query_parameters(rad_to_deg_out=rad_to_deg) + + allocate(work_g1(nx_global,ny_global)) + + ! determine dx spacing + ! strategy: initialize with dxrect. + ! if want to scale the grid, work from center outwards, + ! multplying neighbor cell by scale factor. + ! this assumes dx varies in x direction only. + ! (i.e, dx is the same across same y location) + if (my_task == master_task) then + + ! initialize with initial dxrect + work_g1(:,:) = dxrect + + ! check if nx is even or odd + ! if even, middle 2 columns are center + ! of odd, middle 1 column is center + if (mod(nx_global,2) == 0) then ! nx_global is even + + ! with even number of x locatons, + ! the center two y columns are center + center1 = nx_global/2 ! integer math + center2 = center1 + 1 ! integer math + + else ! nx_global = odd + ! only one center index. set center2=center1 + center1 = ceiling(real(nx_global/2),int_kind) + center2 = center1 + endif + + ! note loop over only half the x grid points (center1)-1 + ! working from the center outward. + do j = 1, ny_global + do i = 1, center1-1 + ! work from center1 to left + work_g1(center1-i,j) = dxscale*work_g1(center1-i+1,j) + + ! work from center2 to right + work_g1(center2+i,j) = dxscale*work_g1(center2+i-1,j) + enddo ! i + enddo ! j + + endif ! my_task == master_task + + + ! note work_g1 is converted to meters in primary_grid_lengths_HTN + call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE + + ! make ULON array + if (my_task == master_task) then + + ! make first column reference lon in radians. + ! the remaining work_g1 is still dx in meters + work_g1(1,:) = lonrefrect/rad_to_deg ! radians + + ! loop over remaining points and add spacing to successive + ! x locations + do j = 1, ny_global + do i = 2, nx_global ! start from i=2. i=1 is lonrefrect + length = work_g1(i,j)/radius ! grid spacing in radians + work_g1(i,j) = work_g1(i-1,j) + length ! ULON + enddo ! i + enddo ! j + endif ! mytask == master_task + call scatter_global(ULON, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULON, distrb_info, & + ew_boundary_type, ns_boundary_type) + + ! determine dy spacing + ! strategy: initialize with dyrect. + ! if want to scale the grid, work from center outwards, + ! multplying neighbor cell by scale factor. + ! this assumes dy varies in y direction only. + ! (i.e, dy is the same across same x location) + if (my_task == master_task) then + + ! initialize with initial dxrect + work_g1(:,:) = dyrect + + ! check if ny is even or odd + ! if even, middle 2 rows are center + ! of odd, middle 1 row is center + if (mod(ny_global,2) == 0) then ! ny_global is even + + ! with even number of x locatons, + ! the center two y columns are center + center1 = ny_global/2 ! integer math + center2 = center1 + 1 ! integer math + + else ! ny_global = odd + ! only one center index. set center2=center1 + center1 = ceiling(real(ny_global/2),int_kind) + center2 = center1 + endif + + ! note loop over only half the y grid points (center1)-1 + ! working from the center outward. + do i = 1, nx_global + do j = 1, center1-1 + ! work from center1 to bottom + work_g1(i,center1-j) = dyscale*work_g1(i,center1-j+1) + + ! work from center2 to top + work_g1(i,center2+j) = dyscale*work_g1(i,center2+j-1) + enddo ! i + enddo ! j + endif ! mytask == master_task + ! note work_g1 is converted to meters primary_grid_lengths_HTE + call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE + + ! make ULAT array + if (my_task == master_task) then + + ! make first row reference lat in radians. + ! the remaining work_g1 is still dy in meters + work_g1(:,1) = latrefrect/rad_to_deg ! radians + + + ! loop over remaining points and add spacing to successive + ! x locations + do j = 2, ny_global ! start from j=2. j=1 is latrefrect + do i = 1, nx_global + length = work_g1(i,j)/radius ! grid spacing in radians + work_g1(i,j) = work_g1(i,j-1) + length ! ULAT + enddo ! i + enddo ! j + endif ! mytask == master_task + call scatter_global(ULAT, work_g1, master_task, distrb_info, & + field_loc_NEcorner, field_type_scalar) + call ice_HaloExtrapolate(ULAT, distrb_info, & + ew_boundary_type, ns_boundary_type) + + + deallocate(work_g1) + + end subroutine rectgrid_scale_dxdy + !======================================================================= ! Complex land mask for testing box cases diff --git a/configuration/scripts/cice.batch.csh b/configuration/scripts/cice.batch.csh index fbe172f51..d737f78ba 100755 --- a/configuration/scripts/cice.batch.csh +++ b/configuration/scripts/cice.batch.csh @@ -90,8 +90,8 @@ else if (${ICE_MACHINE} =~ nrlssc*) then # nrlssc queue system has nodes with different task per node if (${taskpernode} <= 12) set tpnstr = 'twelve' if (${taskpernode} == 20) set tpnstr = 'twenty' -if (${taskpernode} == 24) set tpnstr = 'twentyfour' -if (${taskpernode} == 28) set tpnstr = 'twentyeight' +if (${taskpernode} >= 24) set tpnstr = 'twentyfour' +#if (${taskpernode} == 28) set tpnstr = 'twentyeight' cat >> ${jobfile} < Date: Tue, 23 Aug 2022 09:03:15 -0700 Subject: [PATCH 16/57] Update Pull Request Template, add question about updating Icepack (#754) * Update Pull Request Template, add question about updating Icepack --- .github/PULL_REQUEST_TEMPLATE.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 6256f9bc6..77cf56ddc 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -18,6 +18,9 @@ please refer to: Date: Wed, 24 Aug 2022 23:27:25 +0200 Subject: [PATCH 17/57] Refactored evp sub cycling loop (#756) * Refactored evp sub cycling loop * corrected indent and case for dyn_haloUptdate --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 66 ++++++++++++++-------- 1 file changed, 43 insertions(+), 23 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index c2060285a..b3fe82ff0 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -745,9 +745,10 @@ subroutine evp (dt) else ! evp_algorithm == standard_2d (Standard CICE) call ice_timer_start(timer_evp_2d) - do ksub = 1,ndte ! subcycling - if (grid_ice == "B") then + if (grid_ice == "B") then + + do ksub = 1,ndte ! subcycling !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) do iblk = 1, nblocks @@ -810,7 +811,17 @@ subroutine evp (dt) enddo ! iblk !$OMP END PARALLEL DO - elseif (grid_ice == "C") then + ! U fields at NE corner + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_vector, & + uvel, vvel) + + enddo ! sub cycling + + elseif (grid_ice == "C") then + + do ksub = 1,ndte ! subcycling !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -837,7 +848,7 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_NEcorner, field_type_scalar, & shearU) @@ -877,7 +888,7 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_center, field_type_scalar, & zetax2T, etax2T, stresspT, stressmT) @@ -900,7 +911,7 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info , halo_info_mask, & + call dyn_haloUpdate (halo_info , halo_info_mask, & field_loc_NEcorner, field_type_scalar, & stress12U) @@ -958,10 +969,10 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Eface, field_type_vector, & uvelE) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Nface, field_type_vector, & vvelN) @@ -971,10 +982,10 @@ subroutine evp (dt) vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Nface, field_type_vector, & uvelN) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Eface, field_type_vector, & vvelE) @@ -983,8 +994,17 @@ subroutine evp (dt) uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) + ! U fields at NE corner + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_vector, & + uvel, vvel) + + enddo ! subcycling - elseif (grid_ice == "CD") then + elseif (grid_ice == "CD") then + + do ksub = 1,ndte ! subcycling !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -1020,7 +1040,7 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_center, field_type_scalar, & zetax2T, etax2T) @@ -1065,10 +1085,10 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_center, field_type_scalar, & stresspT, stressmT, stress12T) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_NEcorner,field_type_scalar, & stresspU, stressmU, stress12U) @@ -1148,10 +1168,10 @@ subroutine evp (dt) !$OMP END PARALLEL DO ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Eface, field_type_vector, & uvelE, vvelE) - call dyn_HaloUpdate (halo_info, halo_info_mask, & + call dyn_haloUpdate (halo_info, halo_info_mask, & field_loc_Nface, field_type_vector, & uvelN, vvelN) @@ -1160,16 +1180,16 @@ subroutine evp (dt) uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) + ! U fields at NE corner + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_vector, & + uvel, vvel) - endif ! grid_ice + enddo ! subcycling - ! U fields at NE corner - ! calls ice_haloUpdate, controls bundles and masks - call dyn_HaloUpdate (halo_info, halo_info_mask, & - field_loc_NEcorner, field_type_vector, & - uvel, vvel) + endif ! grid_ice - enddo ! subcycling call ice_timer_stop(timer_evp_2d) endif ! evp_algorithm From 007fbff9b1012b4fb1e447141a888d18b6c24d19 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Wed, 24 Aug 2022 17:29:19 -0400 Subject: [PATCH 18/57] ice_grid: initialize 'l_readCenter' for all grid types (#758) In 3fedc78 (Allow for read of tlat, tlon, anglet with popgrid (#463), 2020-06-24), ice_grid::init_grid2 was changed so that ice_grid::Tlatlon, which computes the TLAT and TLON arrays from ULAT and ULON, is only called if the private module variable 'l_readCenter' is false. The idea is that if the grid file contains a variable 'anglet', then it is assumed that it also contains variables 'tlon' and 'tlat', and so these fields are read directly instead of being computed. This logic, however, was only implemented in ice_grid::popgrid_nc, which sets 'l_readCenter' depending on the presence or absence of 'anglet' in the grid file. This means that if 'popgrid_nc' is not called (for example with "grid_format='bin'", in which case init_grid2 calls 'popgrid' and not 'popgrid_nc'), then 'l_readCenter' is used uninitialized in init_grid2, and so it's possible that 'Tlatlon' is not called, if 'l_readCenter' happens to be initialized to true. This in turns leads to 'TLAT' and 'TLON' being uninitialized, and the code failing when accessing these arrays if compiling with NaN initialization. Fix this by initializing 'l_readCenter' at the beginning of init_grid2, such that it is initialized for all choices of 'grid_format' and 'grid_type'. Remove the initialization in 'popgrid_nc'. --- cicecore/cicedynB/infrastructure/ice_grid.F90 | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 723c6be76..a7d8c2357 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -485,6 +485,8 @@ subroutine init_grid2 ! lat, lon, cell widths, angle, land mask !----------------------------------------------------------------- + l_readCenter = .false. + call icepack_query_parameters(pi_out=pi, pi2_out=pi2, puny_out=puny) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -955,7 +957,6 @@ subroutine popgrid_nc call ice_open_nc(kmt_file,fid_kmt) diag = .true. ! write diagnostic info - l_readCenter = .false. !----------------------------------------------------------------- ! topography !----------------------------------------------------------------- From 658799554aafd24f9c2fd575b68b47b281b03027 Mon Sep 17 00:00:00 2001 From: TRasmussen <33480590+TillRasmussen@users.noreply.github.com> Date: Thu, 22 Sep 2022 04:47:40 +0200 Subject: [PATCH 19/57] move deformation out of loop for B grid only (#755) * move deformation out of loop for B grid only * Moved C and CD grid deformations * correct location of bgrid deformation --- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 107 +++++++++++---------- 1 file changed, 58 insertions(+), 49 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index b3fe82ff0..39f91e418 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -774,22 +774,6 @@ subroutine evp (dt) stress12_3(:,:,iblk), stress12_4(:,:,iblk), & strtmp (:,:,:) ) - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- - if (ksub == ndte) then - call deformations (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - cxp (:,:,iblk), cyp (:,:,iblk), & - cxm (:,:,iblk), cym (:,:,iblk), & - tarear (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) - endif - !----------------------------------------------------------------- ! momentum equation !----------------------------------------------------------------- @@ -819,6 +803,26 @@ subroutine evp (dt) enddo ! sub cycling + !----------------------------------------------------------------- + ! save quantities for mechanical redistribution + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call deformations (nx_block , ny_block , & + icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + cxp (:,:,iblk), cyp (:,:,iblk), & + cxm (:,:,iblk), cym (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) + enddo + !$OMP END PARALLEL DO + + elseif (grid_ice == "C") then do ksub = 1,ndte ! subcycling @@ -866,24 +870,6 @@ subroutine evp (dt) zetax2T (:,:,iblk), etax2T (:,:,iblk), & stresspT (:,:,iblk), stressmT (:,:,iblk)) - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- - if (ksub == ndte) then - - call deformationsC_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), uarea (:,:,iblk), & - shearU (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) - - endif enddo !$OMP END PARALLEL DO @@ -1002,6 +988,26 @@ subroutine evp (dt) enddo ! subcycling + !----------------------------------------------------------------- + ! save quantities for mechanical redistribution + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call deformationsC_T (nx_block , ny_block , & + icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), uarea (:,:,iblk), & + shearU (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) + enddo + !$OMP END PARALLEL DO + elseif (grid_ice == "CD") then do ksub = 1,ndte ! subcycling @@ -1021,21 +1027,6 @@ subroutine evp (dt) stresspT (:,:,iblk), stressmT (:,:,iblk), & stress12T (:,:,iblk) ) - !----------------------------------------------------------------- - ! on last subcycle, save quantities for mechanical redistribution - !----------------------------------------------------------------- - if (ksub == ndte) then - call deformationsCD_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) - endif enddo !$OMP END PARALLEL DO @@ -1188,6 +1179,24 @@ subroutine evp (dt) enddo ! subcycling + !----------------------------------------------------------------- + ! save quantities for mechanical redistribution + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call deformationsCD_T (nx_block , ny_block , & + icellt (iblk), & + indxti (:,iblk), indxtj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) + enddo + !$OMP END PARALLEL DO endif ! grid_ice call ice_timer_stop(timer_evp_2d) From 6399af75260479a66675ad2b00ffc24ec46d2e10 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Thu, 29 Sep 2022 12:19:25 -0700 Subject: [PATCH 20/57] Update dEdd implementation (#760) * Update dEdd implementation - Update Icepack with several fixes (changes answers for tr_aero) - Update bgc aerosol table to higher precision - Add modal aerosol tests - Update test suites * Update Icepack including dEdd fixes --- cicecore/cicedynB/general/ice_forcing_bgc.F90 | 144 ++++++++++++------ configuration/scripts/options/set_nml.bgczm | 30 ++++ configuration/scripts/options/set_nml.modal | 4 + configuration/scripts/tests/base_suite.ts | 3 +- configuration/scripts/tests/io_suite.ts | 10 +- configuration/scripts/tests/nothread_suite.ts | 2 +- configuration/scripts/tests/omp_suite.ts | 12 +- icepack | 2 +- 8 files changed, 145 insertions(+), 62 deletions(-) create mode 100644 configuration/scripts/options/set_nml.bgczm create mode 100644 configuration/scripts/options/set_nml.modal diff --git a/cicecore/cicedynB/general/ice_forcing_bgc.F90 b/cicecore/cicedynB/general/ice_forcing_bgc.F90 index fc440834c..2f07d05f1 100644 --- a/cicecore/cicedynB/general/ice_forcing_bgc.F90 +++ b/cicecore/cicedynB/general/ice_forcing_bgc.F90 @@ -889,68 +889,116 @@ subroutine faero_optics ! this data is used in bulk aerosol treatment in dEdd radiation kaer_tab = reshape((/ & ! aerosol mass extinction cross section (m2/kg) - 11580.61872, 5535.41835, 2793.79690, & - 25798.96479, 11536.03871, 4688.24207, & - 196.49772, 204.14078, 214.42287, & - 2665.85867, 2256.71027, 820.36024, & - 840.78295, 1028.24656, 1163.03298, & - 387.51211, 414.68808, 450.29814/), & +! 11580.61872, 5535.41835, 2793.79690, & +! 25798.96479, 11536.03871, 4688.24207, & +! 196.49772, 204.14078, 214.42287, & +! 2665.85867, 2256.71027, 820.36024, & +! 840.78295, 1028.24656, 1163.03298, & +! 387.51211, 414.68808, 450.29814/), & + 11580.61872_dbl_kind, 5535.41835_dbl_kind, 2793.79690_dbl_kind, & + 25798.96479_dbl_kind, 11536.03871_dbl_kind, 4688.24207_dbl_kind, & + 196.49772_dbl_kind, 204.14078_dbl_kind, 214.42287_dbl_kind, & + 2665.85867_dbl_kind, 2256.71027_dbl_kind, 820.36024_dbl_kind, & + 840.78295_dbl_kind, 1028.24656_dbl_kind, 1163.03298_dbl_kind, & + 387.51211_dbl_kind, 414.68808_dbl_kind, 450.29814_dbl_kind/), & (/icepack_nspint,icepack_max_aero/)) waer_tab = reshape((/ & ! aerosol single scatter albedo (fraction) - 0.29003, 0.17349, 0.06613, & - 0.51731, 0.41609, 0.21324, & - 0.84467, 0.94216, 0.95666, & - 0.97764, 0.99402, 0.98552, & - 0.94146, 0.98527, 0.99093, & - 0.90034, 0.96543, 0.97678/), & +! 0.29003, 0.17349, 0.06613, & +! 0.51731, 0.41609, 0.21324, & +! 0.84467, 0.94216, 0.95666, & +! 0.97764, 0.99402, 0.98552, & +! 0.94146, 0.98527, 0.99093, & +! 0.90034, 0.96543, 0.97678/), & + 0.29003_dbl_kind, 0.17349_dbl_kind, 0.06613_dbl_kind, & + 0.51731_dbl_kind, 0.41609_dbl_kind, 0.21324_dbl_kind, & + 0.84467_dbl_kind, 0.94216_dbl_kind, 0.95666_dbl_kind, & + 0.97764_dbl_kind, 0.99402_dbl_kind, 0.98552_dbl_kind, & + 0.94146_dbl_kind, 0.98527_dbl_kind, 0.99093_dbl_kind, & + 0.90034_dbl_kind, 0.96543_dbl_kind, 0.97678_dbl_kind/), & (/icepack_nspint,icepack_max_aero/)) gaer_tab = reshape((/ & ! aerosol asymmetry parameter (cos(theta)) - 0.35445, 0.19838, 0.08857, & - 0.52581, 0.32384, 0.14970, & - 0.83162, 0.78306, 0.74375, & - 0.68861, 0.70836, 0.54171, & - 0.70239, 0.66115, 0.71983, & - 0.78734, 0.73580, 0.64411/), & +! 0.35445, 0.19838, 0.08857, & +! 0.52581, 0.32384, 0.14970, & +! 0.83162, 0.78306, 0.74375, & +! 0.68861, 0.70836, 0.54171, & +! 0.70239, 0.66115, 0.71983, & +! 0.78734, 0.73580, 0.64411/), & + 0.35445_dbl_kind, 0.19838_dbl_kind, 0.08857_dbl_kind, & + 0.52581_dbl_kind, 0.32384_dbl_kind, 0.14970_dbl_kind, & + 0.83162_dbl_kind, 0.78306_dbl_kind, 0.74375_dbl_kind, & + 0.68861_dbl_kind, 0.70836_dbl_kind, 0.54171_dbl_kind, & + 0.70239_dbl_kind, 0.66115_dbl_kind, 0.71983_dbl_kind, & + 0.78734_dbl_kind, 0.73580_dbl_kind, 0.64411_dbl_kind/), & (/icepack_nspint,icepack_max_aero/)) ! this data is used in MODAL AEROSOL treatment in dEdd radiation kaer_bc_tab = reshape((/ & ! aerosol mass extinction cross section (m2/kg) - 12955.44732, 5946.89461, 2772.33366, & - 12085.30664, 7438.83131, 3657.13084, & - 9753.99698, 7342.87139, 4187.79304, & - 7815.74879, 6659.65096, 4337.98863, & - 6381.28194, 5876.78408, 4254.65054, & - 5326.93163, 5156.74532, 4053.66581, & - 4538.09763, 4538.60875, 3804.10884, & - 3934.17604, 4020.20799, 3543.27199, & - 3461.20656, 3587.80962, 3289.98060, & - 3083.03396, 3226.27231, 3052.91441/), & +! 12955.44732, 5946.89461, 2772.33366, & +! 12085.30664, 7438.83131, 3657.13084, & +! 9753.99698, 7342.87139, 4187.79304, & +! 7815.74879, 6659.65096, 4337.98863, & +! 6381.28194, 5876.78408, 4254.65054, & +! 5326.93163, 5156.74532, 4053.66581, & +! 4538.09763, 4538.60875, 3804.10884, & +! 3934.17604, 4020.20799, 3543.27199, & +! 3461.20656, 3587.80962, 3289.98060, & +! 3083.03396, 3226.27231, 3052.91441/), & + 12955.4473151973_dbl_kind, 5946.89461205564_dbl_kind, 2772.33366387720_dbl_kind, & + 12085.3066388712_dbl_kind, 7438.83131367992_dbl_kind, 3657.13084442081_dbl_kind, & + 9753.99697536893_dbl_kind, 7342.87139082553_dbl_kind, 4187.79303607928_dbl_kind, & + 7815.74879345131_dbl_kind, 6659.65096365965_dbl_kind, 4337.98863414228_dbl_kind, & + 6381.28194381772_dbl_kind, 5876.78408231865_dbl_kind, 4254.65053724305_dbl_kind, & + 5326.93163497508_dbl_kind, 5156.74531505734_dbl_kind, 4053.66581550147_dbl_kind, & + 4538.09762614960_dbl_kind, 4538.60874501597_dbl_kind, 3804.10884202567_dbl_kind, & + 3934.17604000777_dbl_kind, 4020.20798667897_dbl_kind, 3543.27199302277_dbl_kind, & + 3461.20655708248_dbl_kind, 3587.80961820605_dbl_kind, 3289.98060303894_dbl_kind, & + 3083.03396032095_dbl_kind, 3226.27231329114_dbl_kind, 3052.91440681137_dbl_kind/), & (/icepack_nspint,10/)) waer_bc_tab = reshape((/ & ! aerosol single scatter albedo (fraction) - 0.26107, 0.15861, 0.06535, & - 0.37559, 0.30318, 0.19483, & - 0.42224, 0.36913, 0.27875, & - 0.44777, 0.40503, 0.33026, & - 0.46444, 0.42744, 0.36426, & - 0.47667, 0.44285, 0.38827, & - 0.48635, 0.45428, 0.40617, & - 0.49440, 0.46328, 0.42008, & - 0.50131, 0.47070, 0.43128, & - 0.50736, 0.47704, 0.44056/), & +! 0.26107, 0.15861, 0.06535, & +! 0.37559, 0.30318, 0.19483, & +! 0.42224, 0.36913, 0.27875, & +! 0.44777, 0.40503, 0.33026, & +! 0.46444, 0.42744, 0.36426, & +! 0.47667, 0.44285, 0.38827, & +! 0.48635, 0.45428, 0.40617, & +! 0.49440, 0.46328, 0.42008, & +! 0.50131, 0.47070, 0.43128, & +! 0.50736, 0.47704, 0.44056/), & + 0.261071919959011_dbl_kind, 0.158608047940651_dbl_kind, 0.0653546447770291_dbl_kind, & + 0.375593873543050_dbl_kind, 0.303181671502553_dbl_kind, 0.194832290545495_dbl_kind, & + 0.422240383488477_dbl_kind, 0.369134186611324_dbl_kind, 0.278752556671685_dbl_kind, & + 0.447772153910671_dbl_kind, 0.405033725319593_dbl_kind, 0.330260831965086_dbl_kind, & + 0.464443094570456_dbl_kind, 0.427439117980081_dbl_kind, 0.364256689383418_dbl_kind, & + 0.476668995985241_dbl_kind, 0.442854173154887_dbl_kind, 0.388270470928338_dbl_kind, & + 0.486347881475941_dbl_kind, 0.454284736567521_dbl_kind, 0.406167596922937_dbl_kind, & + 0.494397834153785_dbl_kind, 0.463279526357470_dbl_kind, 0.420084410794128_dbl_kind, & + 0.501307856563459_dbl_kind, 0.470696914968199_dbl_kind, 0.431284889617716_dbl_kind, & + 0.507362336297419_dbl_kind, 0.477038272961243_dbl_kind, 0.440559363958571_dbl_kind/), & (/icepack_nspint,10/)) gaer_bc_tab = reshape((/ & ! aerosol asymmetry parameter (cos(theta)) - 0.28328, 0.19644, 0.10498, & - 0.44488, 0.32615, 0.19612, & - 0.54724, 0.41611, 0.26390, & - 0.61711, 0.48475, 0.31922, & - 0.66673, 0.53923, 0.36632, & - 0.70296, 0.58337, 0.40732, & - 0.73002, 0.61960, 0.44344, & - 0.75064, 0.64959, 0.47551, & - 0.76663, 0.67461, 0.50415, & - 0.77926, 0.69561, 0.52981/),& +! 0.28328, 0.19644, 0.10498, & +! 0.44488, 0.32615, 0.19612, & +! 0.54724, 0.41611, 0.26390, & +! 0.61711, 0.48475, 0.31922, & +! 0.66673, 0.53923, 0.36632, & +! 0.70296, 0.58337, 0.40732, & +! 0.73002, 0.61960, 0.44344, & +! 0.75064, 0.64959, 0.47551, & +! 0.76663, 0.67461, 0.50415, & +! 0.77926, 0.69561, 0.52981/),& + 0.283282988564031_dbl_kind, 0.196444209821980_dbl_kind, 0.104976473902976_dbl_kind, & + 0.444877326083453_dbl_kind, 0.326147707342261_dbl_kind, 0.196121968923488_dbl_kind, & + 0.547243414035631_dbl_kind, 0.416106187964493_dbl_kind, 0.263903486903711_dbl_kind, & + 0.617111563012282_dbl_kind, 0.484745531707601_dbl_kind, 0.319218974395050_dbl_kind, & + 0.666728525631754_dbl_kind, 0.539228555802301_dbl_kind, 0.366323180358996_dbl_kind, & + 0.702956870835387_dbl_kind, 0.583372441336763_dbl_kind, 0.407316408184865_dbl_kind, & + 0.730016668453191_dbl_kind, 0.619595539349710_dbl_kind, 0.443436944107423_dbl_kind, & + 0.750635997128011_dbl_kind, 0.649589805870541_dbl_kind, 0.475512089138887_dbl_kind, & + 0.766634959089444_dbl_kind, 0.674609076223658_dbl_kind, 0.504145461809103_dbl_kind, & + 0.779256641759228_dbl_kind, 0.695614224933709_dbl_kind, 0.529805346632687_dbl_kind/), & (/icepack_nspint,10/)) bcenh(:,:,:) = c0 diff --git a/configuration/scripts/options/set_nml.bgczm b/configuration/scripts/options/set_nml.bgczm new file mode 100644 index 000000000..2644576cd --- /dev/null +++ b/configuration/scripts/options/set_nml.bgczm @@ -0,0 +1,30 @@ +nilyr = 7 +nslyr = 1 +ncat = 5 +nblyr = 7 +n_aero = 0 +n_zaero = 3 +n_algae = 3 +n_doc = 2 +n_dic = 0 +n_don = 1 +n_fed = 1 +n_fep = 1 +ice_ic = 'none' +tr_brine = .true. +tr_zaero = .true. +z_tracers = .true. +solve_zbgc = .true. +scale_bgc = .true. +bgc_data_type = 'default' +tr_bgc_Nit = .true. +tr_bgc_C = .true. +tr_bgc_Am = .true. +tr_bgc_Sil = .true. +tr_bgc_DMS = .true. +tr_bgc_PON = .true. +tr_bgc_hum = .true. +tr_bgc_DON = .true. +tr_bgc_Fe = .true. +modal_aero = .true. +# dEdd_algae = .true. diff --git a/configuration/scripts/options/set_nml.modal b/configuration/scripts/options/set_nml.modal new file mode 100644 index 000000000..767160a53 --- /dev/null +++ b/configuration/scripts/options/set_nml.modal @@ -0,0 +1,4 @@ +shortwave = 'dEdd' +tr_aero = .true. +modal_aero = .true. + diff --git a/configuration/scripts/tests/base_suite.ts b/configuration/scripts/tests/base_suite.ts index d4bbe8031..3007380ab 100644 --- a/configuration/scripts/tests/base_suite.ts +++ b/configuration/scripts/tests/base_suite.ts @@ -43,8 +43,9 @@ smoke gbox128 4x4 boxrestore,short,debug restart gbox80 1x1 box2001 smoke gbox80 1x1 boxslotcyl smoke gbox12 1x1x12x12x1 boxchan,diag1,debug +restart gx3 8x2 modal smoke gx3 8x2 bgcz -smoke gx3 8x2 bgcz,debug +smoke gx3 8x2 bgczm,debug smoke gx3 8x1 bgcskl,debug #smoke gx3 4x1 bgcz,thread smoke_gx3_8x2_bgcz restart gx1 4x2 bgcsklclim,medium diff --git a/configuration/scripts/tests/io_suite.ts b/configuration/scripts/tests/io_suite.ts index 01ea21ec2..84d064f32 100644 --- a/configuration/scripts/tests/io_suite.ts +++ b/configuration/scripts/tests/io_suite.ts @@ -23,7 +23,7 @@ restart gx3 8x4 alt04,histall,ionetcdf,cdf64 restart gx3 8x4 alt05,histall,ionetcdf,precision8,cdf64 restart gx3 16x2 alt06,histall,ionetcdf restart gx3 16x2 alt07,histall,ionetcdf -restart gx3 30x1 bgcz,histall,ionetcdf +restart gx3 30x1 bgczm,histall,ionetcdf restart gx3 15x2 bgcskl,histall,ionetcdf,precision8 restart gx3 31x1 isotope,histall,ionetcdf,cdf64 restart gx3 14x2 fsd12,histall,ionetcdf,precision8 @@ -37,7 +37,7 @@ restart gx3 8x4 alt04,histall,iopio1,precision8,cdf64 restart gx3 8x4 alt05,histall,iopio1,cdf64 restart gx3 32x1 alt06,histall,iopio1,precision8 restart gx3 32x1 alt07,histall,iopio1,precision8 -restart gx3 16x2 bgcz,histall,iopio1,precision8 +restart gx3 16x2 bgczm,histall,iopio1,precision8 restart gx3 30x1 bgcskl,histall,iopio1 restart gx3 8x4 isotope,histall,iopio1,precision8,cdf64 restart gx3 12x2 fsd12,histall,iopio1,cdf64 @@ -51,7 +51,7 @@ restart gx3 8x4 alt04,histall,iopio2 restart gx3 8x4 alt05,histall,iopio2,precision8,cdf64 restart gx3 16x2 alt06,histall,iopio2,cdf64 restart gx3 16x2 alt07,histall,iopio2,cdf64 -restart gx3 16x2 bgcz,histall,iopio2,cdf64 +restart gx3 16x2 bgczm,histall,iopio2,cdf64 restart gx3 30x1 bgcskl,histall,iopio2,precision8 restart gx3 8x4 isotope,histall,iopio2 restart gx3 12x2 fsd12,histall,iopio2,precision8,cdf64 @@ -65,7 +65,7 @@ restart gx3 8x4 alt04,histall,iopio1p,precision8 restart gx3 8x4 alt05,histall,iopio1p restart gx3 6x4 alt06,histall,iopio1p,precision8,cdf64 restart gx3 6x4 alt07,histall,iopio1p,precision8,cdf64 -restart gx3 16x2 bgcz,histall,iopio1p,precision8,cdf64 +restart gx3 16x2 bgczm,histall,iopio1p,precision8,cdf64 restart gx3 30x1 bgcskl,histall,iopio1p,cdf64 restart gx3 8x4 isotope,histall,iopio1p,precision8 restart gx3 12x2 fsd12,histall,iopio1p @@ -79,7 +79,7 @@ restart gx3 8x4 alt04,histall,iopio2p,cdf64 restart gx3 8x4 alt05,histall,iopio2p,precision8 restart gx3 24x1 alt06,histall,iopio2p restart gx3 24x1 alt07,histall,iopio2p -restart gx3 16x2 bgcz,histall,iopio2p +restart gx3 16x2 bgczm,histall,iopio2p restart gx3 30x1 bgcskl,histall,iopio2p,precision8,cdf64 restart gx3 8x4 isotope,histall,iopio2p,cdf64 restart gx3 12x2 fsd12,histall,iopio2p,precision8 diff --git a/configuration/scripts/tests/nothread_suite.ts b/configuration/scripts/tests/nothread_suite.ts index 93839b000..a262ec135 100644 --- a/configuration/scripts/tests/nothread_suite.ts +++ b/configuration/scripts/tests/nothread_suite.ts @@ -54,7 +54,7 @@ restart gx3 12x1 short #restart gx1 24x1 short smoke gx3 16x1 bgcz -smoke gx3 16x1 bgcz,debug +smoke gx3 16x1 bgczm,debug smoke gx3 24x1 bgcskl,debug #tcraig, hangs nodes intermittently on izumi #restart gx1 128x1 bgcsklclim,medium diff --git a/configuration/scripts/tests/omp_suite.ts b/configuration/scripts/tests/omp_suite.ts index 5d5e18376..686fa72db 100644 --- a/configuration/scripts/tests/omp_suite.ts +++ b/configuration/scripts/tests/omp_suite.ts @@ -8,7 +8,7 @@ smoke gx3 4x4 alt04,reprosum,run10day smoke gx3 4x4 alt05,reprosum,run10day smoke gx3 8x2 alt06,reprosum,run10day smoke gx3 7x2 alt07,reprosum,run10day -smoke gx3 8x2 bgcz,reprosum,run10day +smoke gx3 8x2 bgczm,reprosum,run10day smoke gx1 15x2 reprosum,run10day smoke gx1 15x2 seabedprob,reprosum,run10day smoke gx3 14x2 fsd12,reprosum,run10day @@ -34,7 +34,7 @@ smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread smoke_gx3_4x4_alt05_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_alt06_reprosum_run10day smoke gx3 8x1 alt07,reprosum,run10day,cmplogrest,thread smoke_gx3_7x2_alt07_reprosum_run10day -smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_bgcz_reprosum_run10day +smoke gx3 8x1 bgczm,reprosum,run10day,cmplogrest,thread smoke_gx3_8x2_bgczm_reprosum_run10day smoke gx1 18x1 reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread smoke_gx1_15x2_reprosum_run10day_seabedprob smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread smoke_gx3_14x2_fsd12_reprosum_run10day @@ -62,7 +62,7 @@ smoke gx3 4x4 alt04,reprosum,run10day,gridc smoke gx3 4x4 alt05,reprosum,run10day,gridc smoke gx3 8x2 alt06,reprosum,run10day,gridc smoke gx3 7x2 alt07,reprosum,run10day,gridc -smoke gx3 8x2 bgcz,reprosum,run10day,gridc +smoke gx3 8x2 bgczm,reprosum,run10day,gridc smoke gx1 15x2 reprosum,run10day,gridc smoke gx1 15x2 seabedprob,reprosum,run10day,gridc smoke gx3 14x2 fsd12,reprosum,run10day,gridc @@ -88,7 +88,7 @@ smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,grid smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_4x4_alt05_gridc_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_alt06_gridc_reprosum_run10day smoke gx3 8x1 alt07,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_7x2_alt07_gridc_reprosum_run10day -smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_bgcz_gridc_reprosum_run10day +smoke gx3 8x1 bgczm,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x2_bgczm_gridc_reprosum_run10day smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridc smoke_gx1_15x2_gridc_reprosum_run10day_seabedprob smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_14x2_fsd12_gridc_reprosum_run10day @@ -116,7 +116,7 @@ smoke gx3 4x4 alt04,reprosum,run10day,gridcd smoke gx3 4x4 alt05,reprosum,run10day,gridcd smoke gx3 8x2 alt06,reprosum,run10day,gridcd smoke gx3 7x2 alt07,reprosum,run10day,gridcd -smoke gx3 8x2 bgcz,reprosum,run10day,gridcd +smoke gx3 8x2 bgczm,reprosum,run10day,gridcd smoke gx1 15x2 reprosum,run10day,gridcd smoke gx1 15x2 seabedprob,reprosum,run10day,gridcd smoke gx3 14x2 fsd12,reprosum,run10day,gridcd @@ -142,7 +142,7 @@ smoke gx3 8x1 alt04,reprosum,run10day,cmplogrest,thread,grid smoke gx3 8x1 alt05,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_4x4_alt05_gridcd_reprosum_run10day smoke gx3 8x1 alt06,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_alt06_gridcd_reprosum_run10day smoke gx3 8x1 alt07,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_7x2_alt07_gridcd_reprosum_run10day -smoke gx3 8x1 bgcz,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_bgcz_gridcd_reprosum_run10day +smoke gx3 8x1 bgczm,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x2_bgczm_gridcd_reprosum_run10day smoke gx1 18x1 reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day smoke gx1 18x1 seabedprob,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx1_15x2_gridcd_reprosum_run10day_seabedprob smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_14x2_fsd12_gridcd_reprosum_run10day diff --git a/icepack b/icepack index 3a039e598..460ddf8de 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 3a039e598e6395333a278bb1822f03e9bc954ac6 +Subproject commit 460ddf8de6cf9f55572e8bf0e7672f24d6a7ec09 From 036f1f72d5ec5955fc0c8ba76843ea038a83d59a Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Thu, 29 Sep 2022 12:21:07 -0700 Subject: [PATCH 21/57] Update grid averaging for tmass, aice, uvelT, vvelT (#762) * Update grid averaging for tmass, aice, uvelT, vvelT - Update tmass and aice T2U mapping, switch from "F" to "S", F was backwards compatible but not correct (changes answers) - Update ocean forcing T2U averaging in ocn_data_ncar_init, change "F" to "A". - Update uvelT, vvelT averaging in step_therm1, change from 4 point average to U2TA (changes answers for highfreq=.true.) - Remove history grids not needed (i.e. ustr3Dz) * Refactor uvelT, vvelT implementation --- .../cicedynB/analysis/ice_history_shared.F90 | 64 +++++-------------- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 4 +- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 12 ++-- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 4 +- cicecore/cicedynB/general/ice_forcing.F90 | 6 +- cicecore/cicedynB/general/ice_state.F90 | 4 ++ cicecore/cicedynB/general/ice_step_mod.F90 | 30 +++++---- .../infrastructure/ice_restart_driver.F90 | 7 +- 8 files changed, 57 insertions(+), 74 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history_shared.F90 b/cicecore/cicedynB/analysis/ice_history_shared.F90 index ee48a9996..d9c62edde 100644 --- a/cicecore/cicedynB/analysis/ice_history_shared.F90 +++ b/cicecore/cicedynB/analysis/ice_history_shared.F90 @@ -156,57 +156,25 @@ module ice_history_shared igrdz(nvar_grdz) ! true if category/vertical grid field is written character (len=25), public, parameter :: & - tcstr = 'area: tarea' , & ! vcellmeas for T cell quantities - ucstr = 'area: uarea' , & ! vcellmeas for U cell quantities - ncstr = 'area: narea' , & ! vcellmeas for N cell quantities - ecstr = 'area: earea' , & ! vcellmeas for E cell quantities - tstr2D = 'TLON TLAT time' , & ! vcoord for T cell quantities, 2D - ustr2D = 'ULON ULAT time' , & ! vcoord for U cell quantities, 2D - nstr2D = 'NLON NLAT time' , & ! vcoord for N cell quantities, 2D - estr2D = 'ELON ELAT time' , & ! vcoord for E cell quantities, 2D - tstr3Dz = 'TLON TLAT VGRDi time',& ! vcoord for T cell quantities, 3D - ustr3Dz = 'ULON ULAT VGRDi time',& ! vcoord for U cell quantities, 3D - nstr3Dz = 'NLON NLAT VGRDi time',& ! vcoord for N cell quantities, 3D - estr3Dz = 'ELON ELAT VGRDi time',& ! vcoord for E cell quantities, 3D - tstr3Dc = 'TLON TLAT NCAT time',& ! vcoord for T cell quantities, 3D - ustr3Dc = 'ULON ULAT NCAT time',& ! vcoord for U cell quantities, 3D - nstr3Dc = 'NLON NLAT NCAT time',& ! vcoord for N cell quantities, 3D - estr3Dc = 'ELON ELAT NCAT time',& ! vcoord for E cell quantities, 3D - tstr3Db = 'TLON TLAT VGRDb time',& ! vcoord for T cell quantities, 3D - ustr3Db = 'ULON ULAT VGRDb time',& ! vcoord for U cell quantities, 3D - nstr3Db = 'NLON NLAT VGRDb time',& ! vcoord for N cell quantities, 3D - estr3Db = 'ELON ELAT VGRDb time',& ! vcoord for E cell quantities, 3D - tstr3Da = 'TLON TLAT VGRDa time',& ! vcoord for T cell quantities, 3D - ustr3Da = 'ULON ULAT VGRDa time',& ! vcoord for U cell quantities, 3D - nstr3Da = 'NLON NLAT VGRDa time',& ! vcoord for N cell quantities, 3D - estr3Da = 'ELON ELAT VGRDa time',& ! vcoord for E cell quantities, 3D - tstr3Df = 'TLON TLAT NFSD time',& ! vcoord for T cell quantities, 3D - ustr3Df = 'ULON ULAT NFSD time',& ! vcoord for U cell quantities, 3D - nstr3Df = 'NLON NLAT NFSD time',& ! vcoord for N cell quantities, 3D - estr3Df = 'ELON ELAT NFSD time',& ! vcoord for E cell quantities, 3D - -!ferret + ! T grids + tcstr = 'area: tarea' , & ! vcellmeas for T cell quantities + tstr2D = 'TLON TLAT time' , & ! vcoord for T cell, 2D + tstr3Dc = 'TLON TLAT NCAT time', & ! vcoord for T cell, 3D, ncat + tstr3Da = 'TLON TLAT VGRDa time', & ! vcoord for T cell, 3D, ice-snow-bio + tstr3Db = 'TLON TLAT VGRDb time', & ! vcoord for T cell, 3D, ice-bio + tstr3Df = 'TLON TLAT NFSD time', & ! vcoord for T cell, 3D, fsd tstr4Di = 'TLON TLAT VGRDi NCAT', & ! vcoord for T cell, 4D, ice - ustr4Di = 'ULON ULAT VGRDi NCAT', & ! vcoord for U cell, 4D, ice - nstr4Di = 'NLON NLAT VGRDi NCAT', & ! vcoord for N cell, 4D, ice - estr4Di = 'ELON ELAT VGRDi NCAT', & ! vcoord for E cell, 4D, ice tstr4Ds = 'TLON TLAT VGRDs NCAT', & ! vcoord for T cell, 4D, snow - ustr4Ds = 'ULON ULAT VGRDs NCAT', & ! vcoord for U cell, 4D, snow - nstr4Ds = 'NLON NLAT VGRDs NCAT', & ! vcoord for N cell, 4D, snow - estr4Ds = 'ELON ELAT VGRDs NCAT', & ! vcoord for E cell, 4D, snow tstr4Df = 'TLON TLAT NFSD NCAT', & ! vcoord for T cell, 4D, fsd - ustr4Df = 'ULON ULAT NFSD NCAT', & ! vcoord for U cell, 4D, fsd - nstr4Df = 'NLON NLAT NFSD NCAT', & ! vcoord for N cell, 4D, fsd - estr4Df = 'ELON ELAT NFSD NCAT' ! vcoord for E cell, 4D, fsd -!ferret -! tstr4Di = 'TLON TLAT VGRDi NCAT time', & ! ferret can not handle time -! ustr4Di = 'ULON ULAT VGRDi NCAT time', & ! index on 4D variables. -! tstr4Ds = 'TLON TLAT VGRDs NCAT time', & ! Use 'ferret' lines instead -! ustr4Ds = 'ULON ULAT VGRDs NCAT time', & ! (below also) -! tstr4Db = 'TLON TLAT VGRDb NCAT time', & -! ustr4Db = 'ULON ULAT VGRDb NCAT time', & -! tstr4Df = 'TLON TLAT NFSD NCAT time', & -! ustr4Df = 'ULON ULAT NFSD NCAT time', & + ! U grids + ucstr = 'area: uarea' , & ! vcellmeas for U cell quantities + ustr2D = 'ULON ULAT time' , & ! vcoord for U cell, 2D + ! N grids + ncstr = 'area: narea' , & ! vcellmeas for N cell quantities + nstr2D = 'NLON NLAT time' , & ! vcoord for N cell, 2D + ! E grids + ecstr = 'area: earea' , & ! vcellmeas for E cell quantities + estr2D = 'ELON ELAT time' ! vcoord for E cell, 2D !--------------------------------------------------------------- ! flags: write to output file if true or histfreq value diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index f71d959da..3fe582224 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -270,8 +270,8 @@ subroutine eap (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('F', tmass , 'T' , umass, 'U') - call grid_average_X2Y('F', aice_init, 'T' , aiU , 'U') + call grid_average_X2Y('S', tmass , 'T' , umass, 'U') + call grid_average_X2Y('S', aice_init, 'T' , aiU , 'U') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU , 'U') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU , 'U') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 39f91e418..0b8b8ee2d 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -317,22 +317,22 @@ subroutine evp (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('F', tmass , 'T' , umass , 'U') - call grid_average_X2Y('F', aice_init, 'T' , aiU , 'U') + call grid_average_X2Y('S', tmass , 'T' , umass , 'U') + call grid_average_X2Y('S', aice_init, 'T' , aiU , 'U') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU , 'U') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU , 'U') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') call grid_average_X2Y('S', ss_tlty , grid_ocn_dynv, ss_tltyU, 'U') if (grid_ice == 'CD' .or. grid_ice == 'C') then - call grid_average_X2Y('F', tmass , 'T' , emass , 'E') - call grid_average_X2Y('F', aice_init, 'T' , aie , 'E') + call grid_average_X2Y('S', tmass , 'T' , emass , 'E') + call grid_average_X2Y('S', aice_init, 'T' , aie , 'E') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnE , 'E') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnE , 'E') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxE, 'E') call grid_average_X2Y('S', ss_tlty , grid_ocn_dynv, ss_tltyE, 'E') - call grid_average_X2Y('F', tmass , 'T' , nmass , 'N') - call grid_average_X2Y('F', aice_init, 'T' , ain , 'N') + call grid_average_X2Y('S', tmass , 'T' , nmass , 'N') + call grid_average_X2Y('S', aice_init, 'T' , ain , 'N') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnN , 'N') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnN , 'N') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxN, 'N') diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 17fd0b73f..8df5aa313 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -304,8 +304,8 @@ subroutine implicit_solver (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('F',tmass , 'T', umass, 'U') - call grid_average_X2Y('F',aice_init, 'T', aiU , 'U') + call grid_average_X2Y('S',tmass , 'T', umass, 'U') + call grid_average_X2Y('S',aice_init, 'T', aiU , 'U') call grid_average_X2Y('S',uocn , grid_ocn_dynu, uocnU , 'U') call grid_average_X2Y('S',vocn , grid_ocn_dynv, vocnU , 'U') call grid_average_X2Y('S',ss_tltx, grid_ocn_dynu, ss_tltxU, 'U') diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index edff03b9f..381686c9b 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -4129,8 +4129,8 @@ subroutine ocn_data_ncar_init_3D work1(:,:,:) = ocn_frc_m(:,:,:,n ,m) work2(:,:,:) = ocn_frc_m(:,:,:,n+1,m) - call grid_average_X2Y('F',work1,'T',ocn_frc_m(:,:,:,n ,m),'U') - call grid_average_X2Y('F',work2,'T',ocn_frc_m(:,:,:,n+1,m),'U') + call grid_average_X2Y('A',work1,'T',ocn_frc_m(:,:,:,n ,m),'U') + call grid_average_X2Y('A',work2,'T',ocn_frc_m(:,:,:,n+1,m),'U') enddo ! month loop enddo ! field loop @@ -4373,7 +4373,7 @@ subroutine ocn_data_hadgem(dt) use ice_domain, only: nblocks use ice_domain_size, only: max_blocks use ice_flux, only: sst, uocn, vocn - use ice_grid, only: grid_average_X2Y, ANGLET + use ice_grid, only: ANGLET real (kind=dbl_kind), intent(in) :: & dt ! time step diff --git a/cicecore/cicedynB/general/ice_state.F90 b/cicecore/cicedynB/general/ice_state.F90 index d5c115a0c..7b718b824 100644 --- a/cicecore/cicedynB/general/ice_state.F90 +++ b/cicecore/cicedynB/general/ice_state.F90 @@ -109,6 +109,8 @@ module ice_state public :: & uvel , & ! x-component of velocity on U grid (m/s) vvel , & ! y-component of velocity on U grid (m/s) + uvelT , & ! x-component of velocity on T grid (m/s) + vvelT , & ! y-component of velocity on T grid (m/s) uvelE , & ! x-component of velocity on E grid (m/s) vvelE , & ! y-component of velocity on E grid (m/s) uvelN , & ! x-component of velocity on N grid (m/s) @@ -155,6 +157,8 @@ subroutine alloc_state aice0 (nx_block,ny_block,max_blocks) , & ! concentration of open water uvel (nx_block,ny_block,max_blocks) , & ! x-component of velocity on U grid (m/s) vvel (nx_block,ny_block,max_blocks) , & ! y-component of velocity on U grid (m/s) + uvelT (nx_block,ny_block,max_blocks) , & ! x-component of velocity on T grid (m/s) + vvelT (nx_block,ny_block,max_blocks) , & ! y-component of velocity on T grid (m/s) uvelE (nx_block,ny_block,max_blocks) , & ! x-component of velocity on E grid (m/s) vvelE (nx_block,ny_block,max_blocks) , & ! y-component of velocity on E grid (m/s) uvelN (nx_block,ny_block,max_blocks) , & ! x-component of velocity on N grid (m/s) diff --git a/cicecore/cicedynB/general/ice_step_mod.F90 b/cicecore/cicedynB/general/ice_step_mod.F90 index b6f8741c0..794858a83 100644 --- a/cicecore/cicedynB/general/ice_step_mod.F90 +++ b/cicecore/cicedynB/general/ice_step_mod.F90 @@ -209,7 +209,7 @@ subroutine step_therm1 (dt, iblk) Qa_iso, Qref_iso, fiso_evap, HDO_ocn, H2_16O_ocn, H2_18O_ocn use ice_grid, only: lmask_n, lmask_s, tmask use ice_state, only: aice, aicen, aicen_init, vicen_init, & - vice, vicen, vsno, vsnon, trcrn, uvel, vvel, vsnon_init + vice, vicen, vsno, vsnon, trcrn, uvelT, vvelT, vsnon_init #ifdef CICE_IN_NEMO use ice_state, only: aice_init #endif @@ -251,8 +251,8 @@ subroutine step_therm1 (dt, iblk) tr_pond_lvl, tr_pond_topo, calc_Tsfc, highfreq, tr_snow real (kind=dbl_kind) :: & - uvel_center, & ! cell-centered velocity, x component (m/s) - vvel_center, & ! cell-centered velocity, y component (m/s) + uvelTij, & ! cell-centered velocity, x component (m/s) + vvelTij, & ! cell-centered velocity, y component (m/s) puny ! a very small number real (kind=dbl_kind), dimension(n_aero,2,ncat) :: & @@ -337,13 +337,11 @@ subroutine step_therm1 (dt, iblk) do i = ilo, ihi if (highfreq) then ! include ice velocity in calculation of wind stress - uvel_center = p25*(uvel(i,j ,iblk) + uvel(i-1,j ,iblk) & ! cell-centered velocity - + uvel(i,j-1,iblk) + uvel(i-1,j-1,iblk)) ! assumes wind components - vvel_center = p25*(vvel(i,j ,iblk) + vvel(i-1,j ,iblk) & ! are also cell-centered - + vvel(i,j-1,iblk) + vvel(i-1,j-1,iblk)) + uvelTij = uvelT(i,j,iblk) + vvelTij = vvelT(i,j,iblk) else - uvel_center = c0 ! not used - vvel_center = c0 + uvelTij = c0 + vvelTij = c0 endif ! highfreq if (tr_snow) then @@ -391,8 +389,8 @@ subroutine step_therm1 (dt, iblk) vicen = vicen (i,j,:,iblk), & vsno = vsno (i,j, iblk), & vsnon = vsnon (i,j,:,iblk), & - uvel = uvel_center , & - vvel = vvel_center , & + uvel = uvelTij , & + vvel = vvelTij , & Tsfc = trcrn (i,j,nt_Tsfc,:,iblk), & zqsn = trcrn (i,j,nt_qsno:nt_qsno+nslyr-1,:,iblk), & zqin = trcrn (i,j,nt_qice:nt_qice+nilyr-1,:,iblk), & @@ -944,6 +942,8 @@ subroutine step_dyn_horiz (dt) use ice_dyn_vp, only: implicit_solver use ice_dyn_shared, only: kdyn use ice_flux, only: init_history_dyn + use ice_grid, only: grid_average_X2Y + use ice_state, only: uvel, vvel, uvelT, vvelT use ice_transport_driver, only: advection, transport_upwind, transport_remap real (kind=dbl_kind), intent(in) :: & @@ -961,6 +961,14 @@ subroutine step_dyn_horiz (dt) if (kdyn == 2) call eap (dt) if (kdyn == 3) call implicit_solver (dt) + !----------------------------------------------------------------- + ! Compute uvelT, vvelT + ! only needed for highfreq, but compute anyway + !----------------------------------------------------------------- + + call grid_average_X2Y('A', uvel, 'U', uvelT, 'T') + call grid_average_X2Y('A', vvel, 'U', vvelT, 'T') + !----------------------------------------------------------------- ! Horizontal ice transport !----------------------------------------------------------------- diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 64b8d2101..5581bd1cf 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -285,10 +285,10 @@ subroutine restartfile (ice_ic) stresspU, stressmU, stress12U use ice_flux, only: coszen use ice_grid, only: tmask, grid_type, grid_ice, & - iceumask, iceemask, icenmask + iceumask, iceemask, icenmask, grid_average_X2Y use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & - uvelE, vvelE, uvelN, vvelN, & + uvelE, vvelE, uvelN, vvelN, uvelT, vvelT, & trcr_base, nt_strata, n_trcr_strata character (*), optional :: ice_ic @@ -402,6 +402,9 @@ subroutine restartfile (ice_ic) 'vvelN',1,diag,field_loc_Nface, field_type_vector) endif + call grid_average_X2Y('A', uvel, 'U', uvelT, 'T') + call grid_average_X2Y('A', vvel, 'U', vvelT, 'T') + !----------------------------------------------------------------- ! radiation fields !----------------------------------------------------------------- From 422117fc5dcdd97dd0b139de8ed6464b587d6418 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Thu, 6 Oct 2022 18:39:14 -0400 Subject: [PATCH 22/57] doc: improve "Quick Start" section (#765) Mention that CICE must be cloned with '--recurse-submodules' for Icepack to also be cloned, formatting the 'git clone' command as a code block, and fix the link to the Git Workflow Guide. --- doc/source/intro/quickstart.rst | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/doc/source/intro/quickstart.rst b/doc/source/intro/quickstart.rst index 56d19ee70..b68fc32d7 100644 --- a/doc/source/intro/quickstart.rst +++ b/doc/source/intro/quickstart.rst @@ -6,13 +6,14 @@ Quick Start =========== -Download the model from the CICE-Consortium repository, - https://github.com/CICE-Consortium/CICE +Clone the model from the CICE-Consortium repository:: -Instructions for working in github with CICE (and Icepack) can be -found in the `CICE Git and Workflow Guide `_. + git clone --recurse-submodules https://github.com/CICE-Consortium/CICE -You will probably have to download some inputdata, see the `CICE wiki `_ or :ref:`force`. +Instructions for working with Git and GitHub with CICE (and Icepack) can be +found in the `CICE Git Workflow Guide `_. + +You will probably have to download some input data, see the `CICE wiki `_ or :ref:`force`. Software requirements are noted in this :ref:`software` section. From 8c6ba0437b00a53fcd935fd717f931e9a5c5a6cf Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Mon, 10 Oct 2022 15:48:36 -0700 Subject: [PATCH 23/57] Refactor strocnxT, strocnyT implementation (#764) * Refactor strocnxT, strocnyT implementation - add aiU to ice_state - migrate computation of strocnxT and strocnyT to ice_step, needed for thermodynamics, better code reuse. - add strocnxT_sf, strocnyT_sf as coupling field, could be computed differently than the thermodynanics version. The _sf field computation should be in scale fluxes, but because scale_fluxes is called on a block and the _sf fields require a halo update among other things, the computation can't be done in scale_fluxes. - Update the coupling layers to use the _sf version of the fields. - https://github.com/CICE-Consortium/CICE/issues/761 suggests the values of strocnxT, strocnyT should not be scaled for use in thermodynamics. This commit does not make that change yet, but allows for that change to be made easily. - These changes are bit-for-bit for a full test suite on cheyenne with 3 compilers. * Update computation of strocnxT, strocnyT passed into icepack_step_therm1 - No longer divided by aice - strocnxT_sf, strocnyT_sf are still computed in the same way as before * Rename strocn[x,y]T_sf to strocn[x,y]T_iavg Revert strocn[x,y]T passed into thermodynamics to be the version divided by aice, specifically strocn[x,y]T_iavg. This is identical to earlier implementations. --- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 30 +------ cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 32 +------ cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 30 +------ cicecore/cicedynB/general/ice_flux.F90 | 22 +++-- cicecore/cicedynB/general/ice_state.F90 | 6 +- cicecore/cicedynB/general/ice_step_mod.F90 | 85 +++++++++++++------ .../infrastructure/ice_restart_driver.F90 | 18 ++-- .../drivers/mct/cesm1/ice_import_export.F90 | 6 +- .../drivers/mct/cesm1/ice_prescribed_mod.F90 | 10 +-- .../drivers/nuopc/cmeps/ice_import_export.F90 | 6 +- .../nuopc/cmeps/ice_prescribed_mod.F90 | 10 +-- cicecore/drivers/nuopc/dmi/cice_cap.info | 4 +- 12 files changed, 112 insertions(+), 147 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 3fe582224..854fd061b 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -138,7 +138,7 @@ subroutine eap (dt) use ice_flux, only: rdg_conv, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & - strocnxT, strocnyT, strax, stray, & + strax, stray, & TbU, hwater, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & @@ -146,7 +146,7 @@ subroutine eap (dt) use ice_grid, only: tmask, umask, dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, grid_average_X2Y, iceumask, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv - use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & + use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & ice_timer_start, ice_timer_stop @@ -182,7 +182,6 @@ subroutine eap (dt) wateryU , & ! for ocean stress calculation, y (m/s) forcexU , & ! work array: combined atm stress and ocn tilt, x forceyU , & ! work array: combined atm stress and ocn tilt, y - aiU , & ! ice fraction on u-grid umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) @@ -205,10 +204,6 @@ subroutine eap (dt) type (block) :: & this_block ! block information for current block - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 , & ! temporary - work2 ! temporary - character(len=*), parameter :: subname = '(eap)' call ice_timer_start(timer_dynamics) ! dynamics @@ -567,27 +562,6 @@ subroutine eap (dt) enddo !$OMP END PARALLEL DO - ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T - ! TODO: This should be done elsewhere as part of generalization? - ! conservation requires aiU be divided before averaging - work1 = c0 - work2 = c0 - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij,iblk) - j = indxuj(ij,iblk) - work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) - work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) - enddo - enddo - call ice_HaloUpdate (work1, halo_info, & - field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (work2, halo_info, & - field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('F', work1, 'U', strocnxT, 'T') ! shift - call grid_average_X2Y('F', work2, 'U', strocnyT, 'T') - call ice_timer_stop(timer_dynamics) ! dynamics end subroutine eap diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 0b8b8ee2d..3bbfc01bc 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -85,7 +85,7 @@ subroutine evp (dt) use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & - strocnxT, strocnyT, strax, stray, & + strax, stray, & TbU, hwater, & strairxN, strairyN, fmN, & strtltxN, strtltyN, strocnxN, strocnyN, strintxN, strintyN, taubxN, taubyN, & @@ -106,7 +106,7 @@ subroutine evp (dt) tarear, uarear, earear, narear, grid_average_X2Y, uarea, & grid_type, grid_ice, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv - use ice_state, only: aice, vice, vsno, uvel, vvel, uvelN, vvelN, & + use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, uvelN, vvelN, & uvelE, vvelE, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & @@ -155,7 +155,6 @@ subroutine evp (dt) wateryU , & ! for ocean stress calculation, y (m/s) forcexU , & ! work array: combined atm stress and ocn tilt, x forceyU , & ! work array: combined atm stress and ocn tilt, y - aiU , & ! ice fraction on u-grid umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) @@ -217,10 +216,6 @@ subroutine evp (dt) type (block) :: & this_block ! block information for current block - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1, & ! temporary - work2 ! temporary - logical (kind=log_kind), save :: & first_time = .true. ! first time logical @@ -1326,29 +1321,6 @@ subroutine evp (dt) endif - ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T - ! TODO: This should be done elsewhere as part of generalization? - ! TODO: Rename strocn[x,y]T since it's different than strocn[x,y][U,N,E] - ! conservation requires aiU be divided before averaging - work1 = c0 - work2 = c0 - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij,iblk) - j = indxuj(ij,iblk) - work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) - work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) - enddo - enddo - !$OMP END PARALLEL DO - call ice_HaloUpdate (work1, halo_info, & - field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (work2, halo_info, & - field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('F', work1, 'U', strocnxT, 'T') ! shift - call grid_average_X2Y('F', work2, 'U', strocnyT, 'T') - if (grid_ice == 'CD' .or. grid_ice == 'C') then call grid_average_X2Y('S', strintxE, 'E', strintxU, 'U') ! diagnostic call grid_average_X2Y('S', strintyN, 'N', strintyU, 'U') ! diagnostic diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 8df5aa313..631192587 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -171,7 +171,7 @@ subroutine implicit_solver (dt) use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & - strocnxT, strocnyT, strax, stray, & + strax, stray, & TbU, hwater, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & @@ -179,7 +179,7 @@ subroutine implicit_solver (dt) use ice_grid, only: tmask, umask, dxT, dyT, cxp, cyp, cxm, cym, & tarear, grid_type, grid_average_X2Y, iceumask, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv - use ice_state, only: aice, vice, vsno, uvel, vvel, divu, shear, & + use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & ice_timer_start, ice_timer_stop @@ -210,7 +210,6 @@ subroutine implicit_solver (dt) Cb , & ! seabed stress coefficient fpresx , & ! fixed point residual vector, x components: fx = uvel - uprev_k fpresy , & ! fixed point residual vector, y components: fy = vvel - vprev_k - aiU , & ! ice fraction on u-grid umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) @@ -234,10 +233,6 @@ subroutine implicit_solver (dt) real (kind=dbl_kind), allocatable :: & sol(:) ! solution vector - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1, & ! temporary - work2 ! temporary - character(len=*), parameter :: subname = '(implicit_solver)' call ice_timer_start(timer_dynamics) ! dynamics @@ -640,27 +635,6 @@ subroutine implicit_solver (dt) enddo !$OMP END PARALLEL DO - ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T - ! TODO: This should be done elsewhere as part of generalization? - ! conservation requires aiU be divided before averaging - work1 = c0 - work2 = c0 - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij,iblk) - j = indxuj(ij,iblk) - work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) - work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) - enddo - enddo - call ice_HaloUpdate (work1, halo_info, & - field_loc_NEcorner, field_type_vector) - call ice_HaloUpdate (work2, halo_info, & - field_loc_NEcorner, field_type_vector) - call grid_average_X2Y('F',work1,'U',strocnxT,'T') ! shift - call grid_average_X2Y('F',work2,'U',strocnyT,'T') - ! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport ! commented out in order to focus on EVP for now within the cdgrid ! should be used when routine is ready diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedynB/general/ice_flux.F90 index a7e5aa584..8d190753e 100644 --- a/cicecore/cicedynB/general/ice_flux.F90 +++ b/cicecore/cicedynB/general/ice_flux.F90 @@ -35,6 +35,16 @@ module ice_flux ! Dynamics component ! All variables are assumed to be on the atm or ocn thermodynamic ! grid except as noted + ! + ! scale_fluxes divides several of these by aice "in place", so + ! the state of some of these variables is not well defined. In the + ! future, we need to refactor and add "_iavg" versions of the + ! fields to clearly differentiate fields that have been divided + ! by aice and others that are not. The challenge is that we need + ! to go thru each field carefully to see which version is used. + ! For instance, in diagnostics, there are places where these + ! fields are multiplied by aice to compute things properly. + ! strocn[x,y]T_iavg is the first field defined using _iavg. !----------------------------------------------------------------- real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & @@ -56,8 +66,8 @@ module ice_flux ! out to ocean T-cell (kg/m s^2) ! Note, CICE_IN_NEMO uses strocnx and strocny for coupling - strocnxT, & ! ice-ocean stress, x-direction at T points, per ice fraction - strocnyT ! ice-ocean stress, y-direction at T points, per ice fraction + strocnxT_iavg, & ! ice-ocean stress, x-direction at T points, per ice fraction (scaled flux) + strocnyT_iavg ! ice-ocean stress, y-direction at T points, per ice fraction (scaled flux) ! diagnostic @@ -389,8 +399,8 @@ subroutine alloc_flux hwater (nx_block,ny_block,max_blocks), & ! water depth for seabed stress calc (landfast ice) strairxT (nx_block,ny_block,max_blocks), & ! stress on ice by air, x-direction strairyT (nx_block,ny_block,max_blocks), & ! stress on ice by air, y-direction - strocnxT (nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction - strocnyT (nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction + strocnxT_iavg(nx_block,ny_block,max_blocks), & ! ice-ocean stress, x-direction, per ice area + strocnyT_iavg(nx_block,ny_block,max_blocks), & ! ice-ocean stress, y-direction, per ice area sig1 (nx_block,ny_block,max_blocks), & ! normalized principal stress component sig2 (nx_block,ny_block,max_blocks), & ! normalized principal stress component sigP (nx_block,ny_block,max_blocks), & ! internal ice pressure (N/m) @@ -765,8 +775,8 @@ subroutine init_coupler_flux ! fluxes sent to ocean !----------------------------------------------------------------- - strocnxT(:,:,:) = c0 ! ice-ocean stress, x-direction (T-cell) - strocnyT(:,:,:) = c0 ! ice-ocean stress, y-direction (T-cell) + strocnxT_iavg (:,:,:) = c0 ! ice-ocean stress, x-direction (T-cell) + strocnyT_iavg (:,:,:) = c0 ! ice-ocean stress, y-direction (T-cell) fresh (:,:,:) = c0 fsalt (:,:,:) = c0 fpond (:,:,:) = c0 diff --git a/cicecore/cicedynB/general/ice_state.F90 b/cicecore/cicedynB/general/ice_state.F90 index 7b718b824..10e0aabf8 100644 --- a/cicecore/cicedynB/general/ice_state.F90 +++ b/cicecore/cicedynB/general/ice_state.F90 @@ -54,7 +54,8 @@ module ice_state real (kind=dbl_kind), dimension(:,:,:), allocatable, & public :: & - aice , & ! concentration of ice + aice , & ! concentration of ice on T grid + aiU , & ! concentration of ice on U grid vice , & ! volume per unit area of ice (m) vsno ! volume per unit area of snow (m) @@ -151,7 +152,8 @@ subroutine alloc_state file=__FILE__, line=__LINE__) allocate ( & - aice (nx_block,ny_block,max_blocks) , & ! concentration of ice + aice (nx_block,ny_block,max_blocks) , & ! concentration of ice T grid + aiU (nx_block,ny_block,max_blocks) , & ! concentration of ice U grid vice (nx_block,ny_block,max_blocks) , & ! volume per unit area of ice (m) vsno (nx_block,ny_block,max_blocks) , & ! volume per unit area of snow (m) aice0 (nx_block,ny_block,max_blocks) , & ! concentration of open water diff --git a/cicecore/cicedynB/general/ice_step_mod.F90 b/cicecore/cicedynB/general/ice_step_mod.F90 index 794858a83..39f10ffdf 100644 --- a/cicecore/cicedynB/general/ice_step_mod.F90 +++ b/cicecore/cicedynB/general/ice_step_mod.F90 @@ -12,7 +12,14 @@ module ice_step_mod use ice_kinds_mod + use ice_blocks, only: block, get_block + use ice_blocks, only: nx_block, ny_block use ice_constants, only: c0, c1, c1000, c4, p25 + use ice_constants, only: field_loc_center, field_loc_NEcorner, & + field_loc_Nface, field_loc_Eface, & + field_type_scalar, field_type_vector + use ice_domain, only: halo_info, nblocks, blocks_ice + use ice_domain_size, only: max_blocks use ice_exit, only: abort_ice use ice_fileunits, only: nu_diag use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted @@ -95,8 +102,6 @@ end subroutine step_prep subroutine prep_radiation (iblk) - use ice_blocks, only: block, get_block - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & alvdr_ai, alvdf_ai, alidr_ai, alidf_ai, & @@ -188,14 +193,9 @@ subroutine step_therm1 (dt, iblk) hfreebd, hdraft, hridge, distrdg, hkeel, dkeel, lfloe, dfloe, & fswsfcn, fswintn, Sswabsn, Iswabsn, meltsliqn, meltsliq, & fswthrun, fswthrun_vdr, fswthrun_vdf, fswthrun_idr, fswthrun_idf - use ice_blocks, only: block, get_block -#ifdef CICE_IN_NEMO - use ice_blocks, only: nx_block, ny_block -#endif use ice_calendar, only: yday - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero - use ice_flux, only: frzmlt, sst, Tf, strocnxT, strocnyT, rside, fbot, Tbot, Tsnice, & + use ice_flux, only: frzmlt, sst, Tf, strocnxT_iavg, strocnyT_iavg, rside, fbot, Tbot, Tsnice, & meltsn, melttn, meltbn, congeln, snoicen, uatmT, vatmT, fside, & wind, rhoa, potT, Qa, zlvl, zlvs, strax, stray, flatn, fsensn, fsurfn, fcondtopn, & flw, fsnow, fpond, sss, mlt_onset, frz_onset, fcondbotn, fcondbot, fsloss, & @@ -448,8 +448,8 @@ subroutine step_therm1 (dt, iblk) sst = sst (i,j, iblk), & sss = sss (i,j, iblk), & Tf = Tf (i,j, iblk), & - strocnxT = strocnxT (i,j, iblk), & - strocnyT = strocnyT (i,j, iblk), & + strocnxT = strocnxT_iavg(i,j, iblk), & + strocnyT = strocnyT_iavg(i,j, iblk), & fbot = fbot (i,j, iblk), & Tbot = Tbot (i,j, iblk), & Tsnice = Tsnice (i,j, iblk), & @@ -602,9 +602,7 @@ subroutine step_therm2 (dt, iblk) wave_spectrum, wavefreq, dwavefreq, & first_ice, bgrid, cgrid, igrid, floe_rad_c, floe_binwidth, & d_afsd_latg, d_afsd_newi, d_afsd_latm, d_afsd_weld - use ice_blocks, only: block, get_block use ice_calendar, only: yday - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, nblyr, nfsd use ice_flux, only: fresh, frain, fpond, frzmlt, frazil, frz_onset, & update_ocn_f, fsalt, Tf, sss, salinz, fhocn, rside, fside, & @@ -744,8 +742,6 @@ end subroutine step_therm2 subroutine update_state (dt, daidt, dvidt, dagedt, offset) - use ice_blocks, only: nx_block, ny_block - use ice_domain, only: nblocks use ice_domain_size, only: ncat ! use ice_grid, only: tmask use ice_state, only: aicen, trcrn, vicen, vsnon, & @@ -862,8 +858,6 @@ subroutine step_dyn_wave (dt) use ice_arrays_column, only: wave_spectrum, & d_afsd_wave, floe_rad_l, floe_rad_c, wavefreq, dwavefreq - use ice_blocks, only: block, get_block - use ice_domain, only: blocks_ice, nblocks use ice_domain_size, only: ncat, nfsd, nfreq use ice_state, only: trcrn, aicen, aice, vice use ice_timers, only: ice_timer_start, ice_timer_stop, timer_column, & @@ -937,18 +931,34 @@ end subroutine step_dyn_wave subroutine step_dyn_horiz (dt) + use ice_boundary, only: ice_HaloUpdate use ice_dyn_evp, only: evp use ice_dyn_eap, only: eap use ice_dyn_vp, only: implicit_solver use ice_dyn_shared, only: kdyn + use ice_flux, only: strocnxU, strocnyU, strocnxT_iavg, strocnyT_iavg use ice_flux, only: init_history_dyn use ice_grid, only: grid_average_X2Y - use ice_state, only: uvel, vvel, uvelT, vvelT + use ice_state, only: aiU, uvel, vvel, uvelT, vvelT use ice_transport_driver, only: advection, transport_upwind, transport_remap real (kind=dbl_kind), intent(in) :: & dt ! dynamics time step + ! local variables + + type (block) :: & + this_block ! block information for current block + + integer (kind=int_kind) :: & + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + iblk, & ! block index + i, j ! horizontal indices + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + work1, & ! temporary + work2 ! temporary + character(len=*), parameter :: subname = '(step_dyn_horiz)' call init_history_dyn ! initialize dynamic history variables @@ -969,6 +979,38 @@ subroutine step_dyn_horiz (dt) call grid_average_X2Y('A', uvel, 'U', uvelT, 'T') call grid_average_X2Y('A', vvel, 'U', vvelT, 'T') + !----------------------------------------------------------------- + ! Compute strocnxT_iavg, strocnyT_iavg for thermo and coupling + !----------------------------------------------------------------- + + ! strocn computed on U, N, E as needed. Map strocn U divided by aiU to T + ! conservation requires aiU be divided before averaging + work1 = c0 + work2 = c0 + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk), iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + if (aiU(i,j,iblk) /= c0) then + work1(i,j,iblk) = strocnxU(i,j,iblk)/aiU(i,j,iblk) + work2(i,j,iblk) = strocnyU(i,j,iblk)/aiU(i,j,iblk) + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + call ice_HaloUpdate (work1, halo_info, & + field_loc_NEcorner, field_type_vector) + call ice_HaloUpdate (work2, halo_info, & + field_loc_NEcorner, field_type_vector) + call grid_average_X2Y('F', work1, 'U', strocnxT_iavg, 'T') ! shift + call grid_average_X2Y('F', work2, 'U', strocnyT_iavg, 'T') + !----------------------------------------------------------------- ! Horizontal ice transport !----------------------------------------------------------------- @@ -991,8 +1033,6 @@ end subroutine step_dyn_horiz subroutine step_dyn_ridge (dt, ndtd, iblk) use ice_arrays_column, only: hin_max, fzsal, first_ice - use ice_blocks, only: block, get_block - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nilyr, nslyr, n_aero, nblyr use ice_flux, only: & rdg_conv, rdg_shear, dardg1dt, dardg2dt, & @@ -1115,9 +1155,7 @@ end subroutine step_dyn_ridge subroutine step_snow (dt, iblk) - use ice_blocks, only: block, get_block use ice_calendar, only: nstreams - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, nslyr, nilyr use ice_flux, only: snwcnt, wind, fresh, fhocn, fsloss, fsnow use ice_state, only: trcrn, vsno, vsnon, vicen, aicen, aice @@ -1233,9 +1271,7 @@ subroutine step_radiation (dt, iblk) alvdrn, alidrn, alvdfn, alidfn, apeffn, trcrn_sw, snowfracn, & kaer_tab, waer_tab, gaer_tab, kaer_bc_tab, waer_bc_tab, & gaer_bc_tab, bcenh, swgrid, igrid - use ice_blocks, only: block, get_block use ice_calendar, only: calendar_type, days_per_year, nextsw_cday, yday, msec - use ice_domain, only: blocks_ice use ice_domain_size, only: ncat, n_aero, nilyr, nslyr, n_zaero, n_algae, nblyr use ice_flux, only: swvdr, swvdf, swidr, swidf, coszen, fsnow use ice_grid, only: TLAT, TLON, tmask @@ -1423,7 +1459,6 @@ end subroutine step_radiation subroutine ocean_mixed_layer (dt, iblk) use ice_arrays_column, only: Cdn_atm, Cdn_atm_ratio - use ice_blocks, only: nx_block, ny_block use ice_flux, only: sst, Tf, Qa, uatmT, vatmT, wind, potT, rhoa, zlvl, & frzmlt, fhocn, fswthru, flw, flwout_ocn, fsens_ocn, flat_ocn, evap_ocn, & alvdr_ocn, alidr_ocn, alvdf_ocn, alidf_ocn, swidf, swvdf, swidr, swvdr, & @@ -1580,8 +1615,6 @@ subroutine biogeochemistry (dt, iblk) snow_bio_net, fswthrun, Rayleigh_criteria, & ocean_bio_all, sice_rho, fzsal, fzsal_g, & bgrid, igrid, icgrid, cgrid - use ice_blocks, only: block, get_block - use ice_domain, only: blocks_ice use ice_domain_size, only: nblyr, nilyr, nslyr, n_algae, n_zaero, ncat, & n_doc, n_dic, n_don, n_fed, n_fep use ice_flux, only: meltbn, melttn, congeln, snoicen, & diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 5581bd1cf..cfc44d987 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -56,7 +56,7 @@ subroutine dumpfile(filename_spec) use ice_domain, only: nblocks use ice_domain_size, only: nilyr, nslyr, ncat, max_blocks use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, & + strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & @@ -175,8 +175,8 @@ subroutine dumpfile(filename_spec) !----------------------------------------------------------------- ! ocean stress (for bottom heat flux in thermo) !----------------------------------------------------------------- - call write_restart_field(nu_dump,0,strocnxT,'ruf8','strocnxT',1,diag) - call write_restart_field(nu_dump,0,strocnyT,'ruf8','strocnyT',1,diag) + call write_restart_field(nu_dump,0,strocnxT_iavg,'ruf8','strocnxT',1,diag) + call write_restart_field(nu_dump,0,strocnyT_iavg,'ruf8','strocnyT',1,diag) !----------------------------------------------------------------- ! internal stress @@ -277,7 +277,7 @@ subroutine restartfile (ice_ic) use ice_domain_size, only: nilyr, nslyr, ncat, & max_blocks use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, & + strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & @@ -431,9 +431,9 @@ subroutine restartfile (ice_ic) if (my_task == master_task) & write(nu_diag,*) 'min/max ocean stress components' - call read_restart_field(nu_restart,0,strocnxT,'ruf8', & + call read_restart_field(nu_restart,0,strocnxT_iavg,'ruf8', & 'strocnxT',1,diag,field_loc_center, field_type_vector) - call read_restart_field(nu_restart,0,strocnyT,'ruf8', & + call read_restart_field(nu_restart,0,strocnyT_iavg,'ruf8', & 'strocnyT',1,diag,field_loc_center, field_type_vector) !----------------------------------------------------------------- @@ -711,7 +711,7 @@ subroutine restartfile_v4 (ice_ic) use ice_domain_size, only: nilyr, nslyr, ncat, nx_global, ny_global, & max_blocks use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & - strocnxT, strocnyT, sst, frzmlt, & + strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 @@ -876,9 +876,9 @@ subroutine restartfile_v4 (ice_ic) if (my_task == master_task) & write(nu_diag,*) 'min/max ocean stress components' - call ice_read(nu_restart,0,strocnxT,'ruf8',diag, & + call ice_read(nu_restart,0,strocnxT_iavg,'ruf8',diag, & field_loc_center, field_type_vector) - call ice_read(nu_restart,0,strocnyT,'ruf8',diag, & + call ice_read(nu_restart,0,strocnyT_iavg,'ruf8',diag, & field_loc_center, field_type_vector) !----------------------------------------------------------------- diff --git a/cicecore/drivers/mct/cesm1/ice_import_export.F90 b/cicecore/drivers/mct/cesm1/ice_import_export.F90 index 7ac4f0bb7..868ed42b4 100644 --- a/cicecore/drivers/mct/cesm1/ice_import_export.F90 +++ b/cicecore/drivers/mct/cesm1/ice_import_export.F90 @@ -9,7 +9,7 @@ module ice_import_export use ice_constants , only: field_type_vector, c100 use ice_constants , only: p001, p5 use ice_blocks , only: block, get_block, nx_block, ny_block - use ice_flux , only: strairxT, strairyT, strocnxT, strocnyT + use ice_flux , only: strairxT, strairyT, strocnxT_iavg, strocnyT_iavg use ice_flux , only: alvdr, alidr, alvdf, alidf, Tref, Qref, Uref use ice_flux , only: flat, fsens, flwout, evap, fswabs, fhocn, fswthru use ice_flux , only: fresh, fsalt, zlvl, uatm, vatm, potT, Tair, Qa @@ -571,8 +571,8 @@ subroutine ice_export( i2x ) + workx*sin(ANGLET(i,j,iblk)) ! ice/ocean stress (on POP T-grid: convert to lat-lon) - workx = -strocnxT(i,j,iblk) ! N/m^2 - worky = -strocnyT(i,j,iblk) ! N/m^2 + workx = -strocnxT_iavg(i,j,iblk) ! N/m^2 + worky = -strocnyT_iavg(i,j,iblk) ! N/m^2 tauxo(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) & - worky*sin(ANGLET(i,j,iblk)) tauyo(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) & diff --git a/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 b/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 index 78b7d15c4..599249083 100644 --- a/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 +++ b/cicecore/drivers/mct/cesm1/ice_prescribed_mod.F90 @@ -585,11 +585,11 @@ subroutine ice_prescribed_phys ! set non-computed fluxes, ice velocities, ice-ocn stresses to zero !-------------------------------------------------------------------- - frzmlt (:,:,:) = c0 - uvel (:,:,:) = c0 - vvel (:,:,:) = c0 - strocnxT (:,:,:) = c0 - strocnyT (:,:,:) = c0 + frzmlt (:,:,:) = c0 + uvel (:,:,:) = c0 + vvel (:,:,:) = c0 + strocnxT_iavg(:,:,:) = c0 + strocnyT_iavg(:,:,:) = c0 !----------------------------------------------------------------- ! other atm and ocn fluxes diff --git a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 index 7bfc53f45..5a6ce7572 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 @@ -10,7 +10,7 @@ module ice_import_export use ice_domain , only : nblocks, blocks_ice, halo_info, distrb_info use ice_domain_size , only : nx_global, ny_global, block_size_x, block_size_y, max_blocks, ncat use ice_exit , only : abort_ice - use ice_flux , only : strairxT, strairyT, strocnxT, strocnyT + use ice_flux , only : strairxT, strairyT, strocnxT_iavg, strocnyT_iavg use ice_flux , only : alvdr, alidr, alvdf, alidf, Tref, Qref, Uref use ice_flux , only : flat, fsens, flwout, evap, fswabs, fhocn, fswthru use ice_flux , only : fswthru_vdr, fswthru_vdf, fswthru_idr, fswthru_idf @@ -914,8 +914,8 @@ subroutine ice_export( exportState, rc ) tauya(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) + workx*sin(ANGLET(i,j,iblk)) ! ice/ocean stress (on POP T-grid: convert to lat-lon) - workx = -strocnxT(i,j,iblk) ! N/m^2 - worky = -strocnyT(i,j,iblk) ! N/m^2 + workx = -strocnxT_iavg(i,j,iblk) ! N/m^2 + worky = -strocnyT_iavg(i,j,iblk) ! N/m^2 tauxo(i,j,iblk) = workx*cos(ANGLET(i,j,iblk)) - worky*sin(ANGLET(i,j,iblk)) tauyo(i,j,iblk) = worky*cos(ANGLET(i,j,iblk)) + workx*sin(ANGLET(i,j,iblk)) enddo diff --git a/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 index 84973e9dd..06b090ece 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_prescribed_mod.F90 @@ -472,11 +472,11 @@ subroutine ice_prescribed_phys() ! set non-computed fluxes, ice velocities, ice-ocn stresses to zero !-------------------------------------------------------------------- - frzmlt (:,:,:) = c0 - uvel (:,:,:) = c0 - vvel (:,:,:) = c0 - strocnxT (:,:,:) = c0 - strocnyT (:,:,:) = c0 + frzmlt (:,:,:) = c0 + uvel (:,:,:) = c0 + vvel (:,:,:) = c0 + strocnxT_iavg(:,:,:) = c0 + strocnyT_iavg(:,:,:) = c0 !----------------------------------------------------------------- ! other atm and ocn fluxes diff --git a/cicecore/drivers/nuopc/dmi/cice_cap.info b/cicecore/drivers/nuopc/dmi/cice_cap.info index 0ec1dea5a..c4c6bea55 100644 --- a/cicecore/drivers/nuopc/dmi/cice_cap.info +++ b/cicecore/drivers/nuopc/dmi/cice_cap.info @@ -1023,8 +1023,8 @@ module cice_cap dataPtr_vice (i1,j1,iblk) = vice(i,j,iblk) ! sea ice volume dataPtr_vsno (i1,j1,iblk) = vsno(i,j,iblk) ! snow volume dataPtr_fswthru (i1,j1,iblk) = fswthru(i,j,iblk) ! short wave penetration through ice - ui = strocnxT(i,j,iblk) - vj = strocnyT(i,j,iblk) + ui = strocnxT_iavg(i,j,iblk) + vj = strocnyT_iavg(i,j,iblk) angT = ANGLET(i,j,iblk) dataPtr_strocnxT(i1,j1,iblk) = ui*cos(-angT) + vj*sin(angT) ! ice ocean stress dataPtr_strocnyT(i1,j1,iblk) = -ui*sin(angT) + vj*cos(-angT) ! ice ocean stress From 578c111dff02850cd1da5ca02c0ea26ac04cdaf2 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Mon, 10 Oct 2022 21:14:08 -0400 Subject: [PATCH 24/57] ice_init: do broadcast 'default_season' (#766) When the 'default_season' namelist setting was added in 01494c75 (Nml settings (#208), 2018-10-19) to replace 'l_winter' and 'l_spring', a call to 'broadcast_scalar' was forgotten, such that the 'default_season' value from the namelist is only used on the first MPI process; all other processes get the hardcoded default value 'winter' defined in 'ice_init::input_data', resulting in different initialization across the grid for several variables if anything other than 'winter' is used in the namelist. Fix that by broadcasting 'default_season' to all MPI procs. --- cicecore/cicedynB/general/ice_init.F90 | 1 + 1 file changed, 1 insertion(+) diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index e0ebdfbed..893c3da9a 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -980,6 +980,7 @@ subroutine input_data call broadcast_scalar(albsnowi, master_task) call broadcast_scalar(ahmax, master_task) call broadcast_scalar(atmbndy, master_task) + call broadcast_scalar(default_season, master_task) call broadcast_scalar(fyear_init, master_task) call broadcast_scalar(ycycle, master_task) call broadcast_scalar(atm_data_format, master_task) From af8cc84308beb3dd959639f0142a8cbb29a9a5e6 Mon Sep 17 00:00:00 2001 From: Lettie Roach Date: Tue, 11 Oct 2022 11:22:39 -0400 Subject: [PATCH 25/57] Correct units in FSD history output (#769) --- cicecore/cicedynB/analysis/ice_history_fsd.F90 | 8 ++++---- doc/source/science_guide/sg_fundvars.rst | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history_fsd.F90 b/cicecore/cicedynB/analysis/ice_history_fsd.F90 index 50fee99e7..18e936e13 100644 --- a/cicecore/cicedynB/analysis/ice_history_fsd.F90 +++ b/cicecore/cicedynB/analysis/ice_history_fsd.F90 @@ -137,7 +137,7 @@ subroutine init_hist_fsd_2D do ns = 1, nstreams if (f_wave_sig_ht(1:1) /= 'x') & - call define_hist_field(n_wave_sig_ht,"wave_sig_ht","1",tstr2D, tcstr, & + call define_hist_field(n_wave_sig_ht,"wave_sig_ht","m",tstr2D, tcstr, & "significant height of wind and swell waves", & "from attenuated spectrum in ice", c1, c0, & ns, f_wave_sig_ht) @@ -147,7 +147,7 @@ subroutine init_hist_fsd_2D "for waves", c1, c0, & ns, f_aice_ww) if (f_diam_ww(1:1) /= 'x') & - call define_hist_field(n_diam_ww,"diam_ww","1",tstr2D, tcstr, & + call define_hist_field(n_diam_ww,"diam_ww","m",tstr2D, tcstr, & "Average (number) diameter of floes > Dmin", & "for waves", c1, c0, & ns, f_diam_ww) @@ -216,7 +216,7 @@ subroutine init_hist_fsd_3Df if (histfreq(ns) /= 'x') then if (f_afsd(1:1) /= 'x') & - call define_hist_field(n_afsd,"afsd", "1", tstr3Df, tcstr, & + call define_hist_field(n_afsd,"afsd", "1/m", tstr3Df, tcstr, & "areal floe size distribution", & "per unit bin width ", c1, c0, ns, f_afsd) if (f_dafsd_newi(1:1) /= 'x') & @@ -272,7 +272,7 @@ subroutine init_hist_fsd_4Df if (histfreq(ns) /= 'x') then if (f_afsdn(1:1) /= 'x') & - call define_hist_field(n_afsdn,"afsdn","1",tstr4Df, tcstr, & + call define_hist_field(n_afsdn,"afsdn","1/m",tstr4Df, tcstr, & "areal floe size and thickness distribution", & "per unit bin width", c1, c0, ns, f_afsdn) diff --git a/doc/source/science_guide/sg_fundvars.rst b/doc/source/science_guide/sg_fundvars.rst index e80b68f4a..5b5703266 100644 --- a/doc/source/science_guide/sg_fundvars.rst +++ b/doc/source/science_guide/sg_fundvars.rst @@ -14,7 +14,7 @@ modeling is to describe the evolution of the ice thickness distribution In addition to an ice thickness distribution, CICE includes an optional capability for a floe size distribution. -Ice floe horizontal size may change through vertical and lateral growth and melting of existing floes, freezing of new ice, wave breaking, and welding of floes in freezing conditions. The floe size distribution (FSD) is a probability function that characterizes this variability. The scheme is based on the theoretical framework described in :cite:`Horvat15` for a joint floe size and thickness distribution (FSTD), and was implemented by :cite:`Roach18` and :cite:`Roach19`. The joint floe size distribution is carried as an area-weighted tracer, defined as the fraction of ice belonging to a given thickness category with lateral floe size belong to a given floe size class. This development includes interactions between sea ice and ocean surface waves. Input data on ocean surface wave spectra at a single time is provided for testing, but as with the other CICE datasets, it should not be used for production runs or publications. +Ice floe horizontal size may change through vertical and lateral growth and melting of existing floes, freezing of new ice, wave breaking, and welding of floes in freezing conditions. The floe size distribution (FSD) is a probability function that characterizes this variability. The scheme is based on the theoretical framework described in :cite:`Horvat15` for a joint floe size and thickness distribution (FSTD), and was implemented by :cite:`Roach18` and :cite:`Roach19`. The joint floe size distribution is carried as an area-weighted tracer, defined as the fraction of ice belonging to a given thickness category with lateral floe size belong to a given floe size class. This development includes interactions between sea ice and ocean surface waves. Input data on ocean surface wave spectra at a single time is provided for testing, but as with the other CICE datasets, it should not be used for production runs or publications. It is not recommended to use the FSD without ocean surface waves. Additional information about the ITD and joint FSTD for CICE can be found in the `Icepack documentation `_. From 6a62a11db7aac62f1387037d88f7e621af9f4295 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Fri, 14 Oct 2022 14:02:39 -0700 Subject: [PATCH 26/57] Update box test options (#772) - refactor options boxsym and boxislands to boxopen, boxclosed, and boxforce - update box test names as needed - update calculation of test suite failures in cice.results.csh - add documentation to user guide for rectangular grids --- .../{set_nml.boxsymw => set_nml.boxclosed} | 5 -- .../scripts/options/set_nml.boxforcee | 5 ++ .../scripts/options/set_nml.boxforcen | 5 ++ .../scripts/options/set_nml.boxforcene | 5 ++ .../scripts/options/set_nml.boxislandsn | 46 ------------- .../scripts/options/set_nml.boxislandsne | 46 ------------- .../{set_nml.boxislandse => set_nml.boxopen} | 6 -- configuration/scripts/options/set_nml.boxsyme | 58 ---------------- configuration/scripts/options/set_nml.boxsymn | 58 ---------------- .../scripts/options/set_nml.boxsymne | 58 ---------------- configuration/scripts/options/set_nml.boxsyms | 58 ---------------- configuration/scripts/options/set_nml.gbox12 | 2 +- configuration/scripts/options/set_nml.gbox180 | 2 +- configuration/scripts/options/set_nml.gbox80 | 2 +- configuration/scripts/tests/cice.results.csh | 18 +++-- configuration/scripts/tests/gridsys_suite.ts | 69 ++++--------------- doc/source/user_guide/ug_implementation.rst | 28 ++++++++ 17 files changed, 70 insertions(+), 401 deletions(-) rename configuration/scripts/options/{set_nml.boxsymw => set_nml.boxclosed} (89%) create mode 100644 configuration/scripts/options/set_nml.boxforcee create mode 100644 configuration/scripts/options/set_nml.boxforcen create mode 100644 configuration/scripts/options/set_nml.boxforcene delete mode 100644 configuration/scripts/options/set_nml.boxislandsn delete mode 100644 configuration/scripts/options/set_nml.boxislandsne rename configuration/scripts/options/{set_nml.boxislandse => set_nml.boxopen} (84%) delete mode 100644 configuration/scripts/options/set_nml.boxsyme delete mode 100644 configuration/scripts/options/set_nml.boxsymn delete mode 100644 configuration/scripts/options/set_nml.boxsymne delete mode 100644 configuration/scripts/options/set_nml.boxsyms diff --git a/configuration/scripts/options/set_nml.boxsymw b/configuration/scripts/options/set_nml.boxclosed similarity index 89% rename from configuration/scripts/options/set_nml.boxsymw rename to configuration/scripts/options/set_nml.boxclosed index 4be1f5f95..d55faa302 100644 --- a/configuration/scripts/options/set_nml.boxsymw +++ b/configuration/scripts/options/set_nml.boxclosed @@ -23,11 +23,6 @@ kridge = -1 ktransport = -1 coriolis = 'zero' atmbndy = 'constant' -atm_data_type = 'uniform_west' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' calc_strair = .false. rotate_wind = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxforcee b/configuration/scripts/options/set_nml.boxforcee new file mode 100644 index 000000000..357fa69d5 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxforcee @@ -0,0 +1,5 @@ +atm_data_type = 'uniform_east' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' diff --git a/configuration/scripts/options/set_nml.boxforcen b/configuration/scripts/options/set_nml.boxforcen new file mode 100644 index 000000000..87cbaea40 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxforcen @@ -0,0 +1,5 @@ +atm_data_type = 'uniform_north' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' diff --git a/configuration/scripts/options/set_nml.boxforcene b/configuration/scripts/options/set_nml.boxforcene new file mode 100644 index 000000000..396fabde0 --- /dev/null +++ b/configuration/scripts/options/set_nml.boxforcene @@ -0,0 +1,5 @@ +atm_data_type = 'uniform_northeast' +ocn_data_type = 'calm' +ice_data_type = 'uniform' +ice_data_conc = 'parabolic' +ice_data_dist = 'uniform' diff --git a/configuration/scripts/options/set_nml.boxislandsn b/configuration/scripts/options/set_nml.boxislandsn deleted file mode 100644 index dd386ce5a..000000000 --- a/configuration/scripts/options/set_nml.boxislandsn +++ /dev/null @@ -1,46 +0,0 @@ -npt = 48 -kmt_type = 'boxislands' -ice_ic = 'internal' -use_leap_years = .false. -histfreq = 'd','x','x','x','x' -grid_type = 'rectangular' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .false. -ew_boundary_type = 'cyclic' -ns_boundary_type = 'open' -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'constant' -atmbndy = 'constant' -atm_data_type = 'uniform_north' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' -rotate_wind = .false. -calc_strair = .false. -restore_ice = .false. -tr_iage = .false. -tr_FY = .false. -tr_pond_lvl = .false. -f_aice = 'd' -f_hi = 'd' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd' -f_vvel = 'd' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd' -f_strairy = 'd' -f_strocnx = 'd' -f_strocny = 'd' -f_divu = 'd' -f_sig1 = 'd' -f_sig2 = 'd' diff --git a/configuration/scripts/options/set_nml.boxislandsne b/configuration/scripts/options/set_nml.boxislandsne deleted file mode 100644 index 75db55722..000000000 --- a/configuration/scripts/options/set_nml.boxislandsne +++ /dev/null @@ -1,46 +0,0 @@ -npt = 48 -kmt_type = 'boxislands' -ice_ic = 'internal' -use_leap_years = .false. -histfreq = 'd','x','x','x','x' -grid_type = 'rectangular' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .false. -ew_boundary_type = 'cyclic' -ns_boundary_type = 'open' -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'constant' -atmbndy = 'constant' -atm_data_type = 'uniform_northeast' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' -rotate_wind = .false. -calc_strair = .false. -restore_ice = .false. -tr_iage = .false. -tr_FY = .false. -tr_pond_lvl = .false. -f_aice = 'd' -f_hi = 'd' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd' -f_vvel = 'd' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd' -f_strairy = 'd' -f_strocnx = 'd' -f_strocny = 'd' -f_divu = 'd' -f_sig1 = 'd' -f_sig2 = 'd' diff --git a/configuration/scripts/options/set_nml.boxislandse b/configuration/scripts/options/set_nml.boxopen similarity index 84% rename from configuration/scripts/options/set_nml.boxislandse rename to configuration/scripts/options/set_nml.boxopen index 4a8a47705..bdc832f7d 100644 --- a/configuration/scripts/options/set_nml.boxislandse +++ b/configuration/scripts/options/set_nml.boxopen @@ -1,5 +1,4 @@ npt = 48 -kmt_type = 'boxislands' ice_ic = 'internal' use_leap_years = .false. histfreq = 'd','x','x','x','x' @@ -16,11 +15,6 @@ kridge = -1 ktransport = -1 coriolis = 'constant' atmbndy = 'constant' -atm_data_type = 'uniform_east' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' rotate_wind = .false. calc_strair = .false. restore_ice = .false. diff --git a/configuration/scripts/options/set_nml.boxsyme b/configuration/scripts/options/set_nml.boxsyme deleted file mode 100644 index 3ff31d2c4..000000000 --- a/configuration/scripts/options/set_nml.boxsyme +++ /dev/null @@ -1,58 +0,0 @@ -days_per_year = 360 -use_leap_years = .false. -npt_unit = 'd' -npt = 5 -ice_ic = 'internal' -restart_ext = .true. -histfreq = 'd','1','x','x','x' -grid_type = 'rectangular' -kmt_type = 'default' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .true. -ew_boundary_type = 'open' -ns_boundary_type = 'open' -tr_iage = .false. -tr_FY = .false. -tr_lvl = .false. -tr_pond_lvl = .false. -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'zero' -atmbndy = 'constant' -atm_data_type = 'uniform_east' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' -calc_strair = .false. -rotate_wind = .false. -restore_ice = .false. -f_aice = 'd1' -f_hi = 'd1' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd1' -f_vvel = 'd1' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd1' -f_strairy = 'd1' -f_strtltx = 'd1' -f_strtlty = 'd1' -f_strcorx = 'd1' -f_strcory = 'd1' -f_strocnx = 'd1' -f_strocny = 'd1' -f_strintx = 'd1' -f_strinty = 'd1' -f_taubx = 'd1' -f_tauby = 'd1' -f_divu = 'd1' -f_sig1 = 'd1' -f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsymn b/configuration/scripts/options/set_nml.boxsymn deleted file mode 100644 index 90ef74813..000000000 --- a/configuration/scripts/options/set_nml.boxsymn +++ /dev/null @@ -1,58 +0,0 @@ -days_per_year = 360 -use_leap_years = .false. -npt_unit = 'd' -npt = 5 -ice_ic = 'internal' -restart_ext = .true. -histfreq = 'd','1','x','x','x' -grid_type = 'rectangular' -kmt_type = 'default' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .true. -ew_boundary_type = 'open' -ns_boundary_type = 'open' -tr_iage = .false. -tr_FY = .false. -tr_lvl = .false. -tr_pond_lvl = .false. -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'zero' -atmbndy = 'constant' -atm_data_type = 'uniform_north' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' -calc_strair = .false. -rotate_wind = .false. -restore_ice = .false. -f_aice = 'd1' -f_hi = 'd1' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd1' -f_vvel = 'd1' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd1' -f_strairy = 'd1' -f_strtltx = 'd1' -f_strtlty = 'd1' -f_strcorx = 'd1' -f_strcory = 'd1' -f_strocnx = 'd1' -f_strocny = 'd1' -f_strintx = 'd1' -f_strinty = 'd1' -f_taubx = 'd1' -f_tauby = 'd1' -f_divu = 'd1' -f_sig1 = 'd1' -f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsymne b/configuration/scripts/options/set_nml.boxsymne deleted file mode 100644 index 5c7374976..000000000 --- a/configuration/scripts/options/set_nml.boxsymne +++ /dev/null @@ -1,58 +0,0 @@ -days_per_year = 360 -use_leap_years = .false. -npt_unit = 'd' -npt = 5 -ice_ic = 'internal' -restart_ext = .true. -histfreq = 'd','1','x','x','x' -grid_type = 'rectangular' -kmt_type = 'default' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .true. -ew_boundary_type = 'open' -ns_boundary_type = 'open' -tr_iage = .false. -tr_FY = .false. -tr_lvl = .false. -tr_pond_lvl = .false. -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'constant' -atmbndy = 'constant' -atm_data_type = 'uniform_northeast' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' -calc_strair = .false. -rotate_wind = .false. -restore_ice = .false. -f_aice = 'd1' -f_hi = 'd1' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd1' -f_vvel = 'd1' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd1' -f_strairy = 'd1' -f_strtltx = 'd1' -f_strtlty = 'd1' -f_strcorx = 'd1' -f_strcory = 'd1' -f_strocnx = 'd1' -f_strocny = 'd1' -f_strintx = 'd1' -f_strinty = 'd1' -f_taubx = 'd1' -f_tauby = 'd1' -f_divu = 'd1' -f_sig1 = 'd1' -f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.boxsyms b/configuration/scripts/options/set_nml.boxsyms deleted file mode 100644 index 7fc0fc5a0..000000000 --- a/configuration/scripts/options/set_nml.boxsyms +++ /dev/null @@ -1,58 +0,0 @@ -days_per_year = 360 -use_leap_years = .false. -npt_unit = 'd' -npt = 5 -ice_ic = 'internal' -restart_ext = .true. -histfreq = 'd','1','x','x','x' -grid_type = 'rectangular' -kmt_type = 'default' -dxrect = 16.e5 -dyrect = 16.e5 -close_boundaries = .true. -ew_boundary_type = 'open' -ns_boundary_type = 'open' -tr_iage = .false. -tr_FY = .false. -tr_lvl = .false. -tr_pond_lvl = .false. -ktherm = -1 -kstrength = 0 -kdyn = 1 -kridge = -1 -ktransport = -1 -coriolis = 'zero' -atmbndy = 'constant' -atm_data_type = 'uniform_south' -ocn_data_type = 'calm' -ice_data_type = 'uniform' -ice_data_conc = 'parabolic' -ice_data_dist = 'uniform' -calc_strair = .false. -rotate_wind = .false. -restore_ice = .false. -f_aice = 'd1' -f_hi = 'd1' -f_hs = 'd' -f_Tsfc = 'd' -f_uvel = 'd1' -f_vvel = 'd1' -f_uatm = 'd' -f_vatm = 'd' -f_uocn = 'd' -f_vocn = 'd' -f_strairx = 'd1' -f_strairy = 'd1' -f_strtltx = 'd1' -f_strtlty = 'd1' -f_strcorx = 'd1' -f_strcory = 'd1' -f_strocnx = 'd1' -f_strocny = 'd1' -f_strintx = 'd1' -f_strinty = 'd1' -f_taubx = 'd1' -f_tauby = 'd1' -f_divu = 'd1' -f_sig1 = 'd1' -f_sig2 = 'd1' diff --git a/configuration/scripts/options/set_nml.gbox12 b/configuration/scripts/options/set_nml.gbox12 index 96701522f..b8a07840b 100644 --- a/configuration/scripts/options/set_nml.gbox12 +++ b/configuration/scripts/options/set_nml.gbox12 @@ -2,7 +2,7 @@ ice_ic = 'internal' grid_type = 'rectangular' kmt_type = 'default' atm_data_type = 'box2001' -ocn_data_type = 'calm' +ocn_data_type = 'box2001' ice_data_type = 'box2001' ice_data_conc = 'box2001' ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox180 b/configuration/scripts/options/set_nml.gbox180 index 96701522f..b8a07840b 100644 --- a/configuration/scripts/options/set_nml.gbox180 +++ b/configuration/scripts/options/set_nml.gbox180 @@ -2,7 +2,7 @@ ice_ic = 'internal' grid_type = 'rectangular' kmt_type = 'default' atm_data_type = 'box2001' -ocn_data_type = 'calm' +ocn_data_type = 'box2001' ice_data_type = 'box2001' ice_data_conc = 'box2001' ice_data_dist = 'box2001' diff --git a/configuration/scripts/options/set_nml.gbox80 b/configuration/scripts/options/set_nml.gbox80 index 96701522f..b8a07840b 100644 --- a/configuration/scripts/options/set_nml.gbox80 +++ b/configuration/scripts/options/set_nml.gbox80 @@ -2,7 +2,7 @@ ice_ic = 'internal' grid_type = 'rectangular' kmt_type = 'default' atm_data_type = 'box2001' -ocn_data_type = 'calm' +ocn_data_type = 'box2001' ice_data_type = 'box2001' ice_data_conc = 'box2001' ice_data_dist = 'box2001' diff --git a/configuration/scripts/tests/cice.results.csh b/configuration/scripts/tests/cice.results.csh index 13580c0be..4074115cb 100644 --- a/configuration/scripts/tests/cice.results.csh +++ b/configuration/scripts/tests/cice.results.csh @@ -5,12 +5,13 @@ cat ./results.log | grep -iv "#machinfo" | grep -iv "#envinfo" set pends = `cat ./results.log | grep PEND | wc -l` set misses = `cat ./results.log | grep MISS | wc -l` set failures = `cat ./results.log | grep FAIL | wc -l` -set failbuild = `cat ./results.log | grep FAIL | grep " build " | wc -l` -set failrun = `cat ./results.log | grep FAIL | grep " run " | wc -l` -set failtest = `cat ./results.log | grep FAIL | grep " test " | wc -l` -set failcomp = `cat ./results.log | grep FAIL | grep " compare " | wc -l` -set failbfbc = `cat ./results.log | grep FAIL | grep " bfbcomp " | wc -l` -set failgen = `cat ./results.log | grep FAIL | grep " generate " | wc -l` +set failbuild = `cat ./results.log | grep FAIL | grep " build" | wc -l` +set failrun = `cat ./results.log | grep FAIL | grep " run" | wc -l` +set failtest = `cat ./results.log | grep FAIL | grep " test" | wc -l` +set failcomp = `cat ./results.log | grep FAIL | grep " compare" | wc -l` +set failclog = `cat ./results.log | grep FAIL | grep " complog" | wc -l` +set failbfbc = `cat ./results.log | grep FAIL | grep " bfbcomp" | wc -l` +set failgen = `cat ./results.log | grep FAIL | grep " generate" | wc -l` set success = `cat ./results.log | grep 'PASS\|COPY' | wc -l` set comments = `cat ./results.log | grep "#" | wc -l` set alltotal = `cat ./results.log | wc -l` @@ -29,9 +30,13 @@ echo " #failbuild = $failbuild" >> results.log echo " #failrun = $failrun" >> results.log echo " #failtest = $failtest" >> results.log echo " #failcomp = $failcomp" >> results.log +echo " #failclog = $failclog" >> results.log echo " #failbfbc = $failbfbc" >> results.log echo " #failgen = $failgen" >> results.log +set stamp = `date '+%y%m%d-%H%M%S'` +cp results.log results.$stamp.log + echo "" echo "Descriptors:" echo " PASS - successful completion" @@ -49,6 +54,7 @@ echo "$failures of $chkcnt tests FAILED" #echo " $failrun of $failures FAILED run" #echo " $failtest of $failures FAILED test" #echo " $failcomp of $failures FAILED compare" +#echo " $failclog of $failures FAILED compare" #echo " $failbfbc of $failures FAILED bfbcomp" #echo " $failgen of $failures FAILED generate" exit $failures diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index b33d58ea9..d9752073f 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -8,15 +8,10 @@ smoke gbox80 1x1 box2001 smoke gbox80 2x2 boxwallblock smoke gbox80 1x1 boxslotcyl smoke gbox80 2x4 boxnodyn -#smoke gbox80 2x2 boxsymn,run1day -smoke gbox80 4x2 boxsyme,run1day -#smoke gbox80 4x1 boxsymne,run1day -#smoke gbox80 2x2 boxsymn,run1day,kmtislands -smoke gbox80 4x1 boxsyme,run1day,kmtislands -#smoke gbox80 4x2 boxsymne,run1day,kmtislands -#smoke gbox80 8x1 boxislandsn,run1day -smoke gbox80 4x2 boxislandse,run1day -#smoke gbox80 2x4 boxislandsne,run1day +smoke gbox80 4x2 boxclosed,boxforcee,run1day +smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands +smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day +smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid smoke gx3 1x1x100x116x1 reprosum,run10day smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day @@ -26,17 +21,6 @@ smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16x32_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest smoke_gx1_32x1x16x16x32_reprosum_run10day -smoke gbox80 2x2 boxsymn,run1day,vargrid -smoke gbox80 4x2 boxsyme,run1day,vargrid -smoke gbox80 4x1 boxsymne,run1day,vargrid -#smoke gbox80 2x2 boxsymn,run1day,kmtislands,vargrid -#smoke gbox80 4x1 boxsyme,run1day,kmtislands,vargrid -#smoke gbox80 4x2 boxsymne,run1day,kmtislands,vargrid -#smoke gbox80 8x1 boxislandsn,run1day,vargrid -#smoke gbox80 4x2 boxislandse,run1day,vargrid -#smoke gbox80 2x4 boxislandsne,run1day,vargrid - - smoke gx3 8x2 diag1,run5day,gridcd smoke gx3 8x4 diag1,run5day,debug,gridcd restart gx3 4x2 debug,diag1,gridcd @@ -46,15 +30,10 @@ smoke gbox80 1x1 box2001,gridcd smoke gbox80 2x2 boxwallblock,gridcd smoke gbox80 1x1 boxslotcyl,gridcd smoke gbox80 2x4 boxnodyn,gridcd -#smoke gbox80 2x2 boxsymn,run1day,gridcd -smoke gbox80 4x2 boxsyme,run1day,gridcd -#smoke gbox80 4x1 boxsymne,run1day,gridcd -#smoke gbox80 2x2 boxsymn,run1day,kmtislands,gridcd -smoke gbox80 4x1 boxsyme,run1day,kmtislands,gridcd -#smoke gbox80 4x2 boxsymne,run1day,kmtislands,gridcd -#smoke gbox80 8x1 boxislandsn,run1day,gridcd -smoke gbox80 4x2 boxislandse,run1day,gridcd -#smoke gbox80 2x4 boxislandsne,run1day,gridcd +smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridcd +smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridcd +smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridcd +smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid,gridcd smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day @@ -64,16 +43,6 @@ smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,g smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day -smoke gbox80 2x2 boxsymn,run1day,vargrid,gridcd -smoke gbox80 4x2 boxsyme,run1day,vargrid,gridcd -smoke gbox80 4x1 boxsymne,run1day,vargrid,gridcd -#smoke gbox80 2x2 boxsymn,run1day,kmtislands,vargrid,gridcd -#smoke gbox80 4x1 boxsyme,run1day,kmtislands,vargrid,gridcd -#smoke gbox80 4x2 boxsymne,run1day,kmtislands,vargrid,gridcd -#smoke gbox80 8x1 boxislandsn,run1day,vargrid,gridcd -#smoke gbox80 4x2 boxislandse,run1day,vargrid,gridcd -#smoke gbox80 2x4 boxislandsne,run1day,vargrid,gridcd - smoke gx3 8x2 diag1,run5day,gridc smoke gx3 8x4 diag1,run5day,debug,gridc restart gx3 4x2 debug,diag1,gridc @@ -83,15 +52,10 @@ smoke gbox80 1x1 box2001,gridc smoke gbox80 2x2 boxwallblock,gridc smoke gbox80 1x1 boxslotcyl,gridc smoke gbox80 2x4 boxnodyn,gridc -#smoke gbox80 2x2 boxsymn,run1day,gridc -smoke gbox80 4x2 boxsyme,run1day,gridc -#smoke gbox80 4x1 boxsymne,run1day,gridc -#smoke gbox80 2x2 boxsymn,run1day,kmtislands,gridc -smoke gbox80 4x1 boxsyme,run1day,kmtislands,gridc -#smoke gbox80 4x2 boxsymne,run1day,kmtislands,gridc -#smoke gbox80 8x1 boxislandsn,run1day,gridc -smoke gbox80 4x2 boxislandse,run1day,gridc -#smoke gbox80 2x4 boxislandsne,run1day,gridc +smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridc +smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridc +smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridc +smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid,gridc smoke gx3 1x1x100x116x1 reprosum,run10day,gridc smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day @@ -100,12 +64,3 @@ smoke gx1 32x1x16x16x32 reprosum,run10day,gridc smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day -smoke gbox80 2x2 boxsymn,run1day,vargrid,gridc -smoke gbox80 4x2 boxsyme,run1day,vargrid,gridc -smoke gbox80 4x1 boxsymne,run1day,vargrid,gridc -#smoke gbox80 2x2 boxsymn,run1day,kmtislands,vargrid,gridc -#smoke gbox80 4x1 boxsyme,run1day,kmtislands,vargrid,gridc -#smoke gbox80 4x2 boxsymne,run1day,kmtislands,vargrid,gridc -#smoke gbox80 8x1 boxislandsn,run1day,vargrid,gridc -#smoke gbox80 4x2 boxislandse,run1day,vargrid,gridc -#smoke gbox80 2x4 boxislandsne,run1day,vargrid,gridc diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index a3f7d11bc..4bcfe1ede 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -312,6 +312,34 @@ Special treatment is also required in the scattering routine, and when computing global sums one of each pair of coincident points has to be excluded. +***************** +Rectangular grids +***************** + +Rectangular test grids can be defined for CICE. They are generated +internally and defined by several namelist +settings including ``grid_type`` = ``rectangular``, ``nx_global``, ``ny_global``, +``dx_rect``, ``dy_rect``, ``lonrefrect``, and ``latrefrect``. Forcing and +initial condition can be set via namelists ``atm_data_type``, ``ocn_data_type``, +``ice_data_type``, ``ice_data_conc``, ``ice_data_dist``. Variable grid spacing +is also supported with the namelist settings ``scale_dxdy`` which turns on +the option, and ``dxscale`` and ``dyscale`` which sets the variable grid scaling +factor. Values of 1.0 will produced constant grid spacing. For rectangular grids, +``lonrefrect`` and ``latrefrect`` define the lower left longitude and latitude +value of the grid, ``dx_rect`` and ``dy_rect`` define the base grid spacing, and +``dxscale`` and ``dyscale`` provide the grid space scaling. The base spacing +is set in the center of the rectangular domain and the scaling is applied symetrically +outward as a multiplicative factor in the x and y directions. + +Several predefined rectangular grids are available in CICE with +**cice.setup --grid** including ``gbox12``, ``gbox80``, ``gbox128``, and ``gbox180`` +where 12, 80, 128, and 180 are the number of gridcells in each direction. +Several predefined options also exist, set with **cice.setup --set**, to +establish varied idealized configurations of box tests including ``box2001``, +``boxadv``, ``boxchan``, ``boxnodyn``, ``boxrestore``, ``boxslotcyl``, and +``boxopen``, ``boxclosed``, and ``boxforcee``. See **cice.setup --help** for a current +list of supported settings. + ************** Vertical Grids ************** From 0447b9ec64b7092529da765fa40f280afc6c1925 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Fri, 14 Oct 2022 14:19:57 -0700 Subject: [PATCH 27/57] Update computation of cdn_ocn for use in dynamics (#771) - Compute and used cdn_ocn on U, E, and N cell locations as needed for dynamics. - Add halo updates in dynamics before calling grid_average. This doesn't change answers, but is the safe thing to do in general. Performance does not seem to be affected. --- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 27 ++++++++++--- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 44 ++++++++++++++++------ cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 37 ++++++++++++------ cicecore/shared/ice_arrays_column.F90 | 1 + 4 files changed, 80 insertions(+), 29 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 854fd061b..ea254cbc0 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -177,6 +177,7 @@ subroutine eap (dt) vocnU , & ! j ocean current (m/s) ss_tltxU , & ! sea surface slope, x-direction (m/m) ss_tltyU , & ! sea surface slope, y-direction (m/m) + cdn_ocnU , & ! ocn drag coefficient tmass , & ! total mass of ice and snow (kg/m^2) waterxU , & ! for ocean stress calculation, x (m/s) wateryU , & ! for ocean stress calculation, y (m/s) @@ -186,7 +187,9 @@ subroutine eap (dt) umassdti ! mass of U-cell/dte (kg/m^2 s) real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:) ! temporary for stacking fields for halo update + fld2(:,:,:,:), & ! temporary for stacking fields for halo update + fld3(:,:,:,:), & ! temporary for stacking fields for halo update + fld4(:,:,:,:) ! temporary for stacking fields for halo update real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation @@ -213,6 +216,8 @@ subroutine eap (dt) !----------------------------------------------------------------- allocate(fld2(nx_block,ny_block,2,max_blocks)) + allocate(fld3(nx_block,ny_block,3,max_blocks)) + allocate(fld4(nx_block,ny_block,4,max_blocks)) ! This call is needed only if dt changes during runtime. ! call set_evp_parameters (dt) @@ -265,8 +270,18 @@ subroutine eap (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('S', tmass , 'T' , umass, 'U') - call grid_average_X2Y('S', aice_init, 'T' , aiU , 'U') + call stack_fields(tmass, aice_init, cdn_ocn, fld3) + call ice_HaloUpdate (fld3, halo_info, & + field_loc_center, field_type_scalar) + call stack_fields(uocn, vocn, ss_tltx, ss_tlty, fld4) + call ice_HaloUpdate (fld4, halo_info, & + field_loc_center, field_type_vector) + call unstack_fields(fld3, tmass, aice_init, cdn_ocn) + call unstack_fields(fld4, uocn, vocn, ss_tltx, ss_tlty) + + call grid_average_X2Y('S', tmass , 'T' , umass , 'U') + call grid_average_X2Y('S', aice_init, 'T' , aiU , 'U') + call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnU, 'U') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU , 'U') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU , 'U') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') @@ -486,7 +501,7 @@ subroutine eap (dt) ! call ice_timer_start(timer_tmp2,iblk) call stepu (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & + icellu (iblk), Cdn_ocnU (:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & aiU (:,:,iblk), strtmp (:,:,:), & uocnU (:,:,iblk), vocnU (:,:,iblk), & @@ -540,7 +555,7 @@ subroutine eap (dt) enddo ! subcycling - deallocate(fld2) + deallocate(fld2,fld3,fld4) if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) !----------------------------------------------------------------- @@ -552,7 +567,7 @@ subroutine eap (dt) call dyn_finish & (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & + icellu (iblk), Cdn_ocnU(:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 3bbfc01bc..d1264bae7 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -150,6 +150,7 @@ subroutine evp (dt) vocnU , & ! j ocean current (m/s) ss_tltxU , & ! sea surface slope, x-direction (m/m) ss_tltyU , & ! sea surface slope, y-direction (m/m) + cdn_ocnU , & ! ocn drag coefficient tmass , & ! total mass of ice and snow (kg/m^2) waterxU , & ! for ocean stress calculation, x (m/s) wateryU , & ! for ocean stress calculation, y (m/s) @@ -163,6 +164,7 @@ subroutine evp (dt) vocnN , & ! j ocean current (m/s) ss_tltxN , & ! sea surface slope, x-direction (m/m) ss_tltyN , & ! sea surface slope, y-direction (m/m) + cdn_ocnN , & ! ocn drag coefficient waterxN , & ! for ocean stress calculation, x (m/s) wateryN , & ! for ocean stress calculation, y (m/s) forcexN , & ! work array: combined atm stress and ocn tilt, x @@ -176,6 +178,7 @@ subroutine evp (dt) vocnE , & ! j ocean current (m/s) ss_tltxE , & ! sea surface slope, x-direction (m/m) ss_tltyE , & ! sea surface slope, y-direction (m/m) + cdn_ocnE , & ! ocn drag coefficient waterxE , & ! for ocean stress calculation, x (m/s) wateryE , & ! for ocean stress calculation, y (m/s) forcexE , & ! work array: combined atm stress and ocn tilt, x @@ -185,9 +188,9 @@ subroutine evp (dt) emassdti ! mass of E-cell/dte (kg/m^2 s) real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:) , & ! 2 bundled fields - fld3(:,:,:,:) , & ! 3 bundled fields - fld4(:,:,:,:) ! 4 bundled fields + fld2(:,:,:,:), & ! 2 bundled fields + fld3(:,:,:,:), & ! 3 bundled fields + fld4(:,:,:,:) ! 4 bundled fields real (kind=dbl_kind), allocatable :: & strengthU(:,:,:), & ! strength averaged to U points @@ -312,8 +315,18 @@ subroutine evp (dt) ! convert fields from T to U grid !----------------------------------------------------------------- + call stack_fields(tmass, aice_init, cdn_ocn, fld3) + call ice_HaloUpdate (fld3, halo_info, & + field_loc_center, field_type_scalar) + call stack_fields(uocn, vocn, ss_tltx, ss_tlty, fld4) + call ice_HaloUpdate (fld4, halo_info, & + field_loc_center, field_type_vector) + call unstack_fields(fld3, tmass, aice_init, cdn_ocn) + call unstack_fields(fld4, uocn, vocn, ss_tltx, ss_tlty) + call grid_average_X2Y('S', tmass , 'T' , umass , 'U') call grid_average_X2Y('S', aice_init, 'T' , aiU , 'U') + call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnU, 'U') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU , 'U') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU , 'U') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') @@ -322,12 +335,14 @@ subroutine evp (dt) if (grid_ice == 'CD' .or. grid_ice == 'C') then call grid_average_X2Y('S', tmass , 'T' , emass , 'E') call grid_average_X2Y('S', aice_init, 'T' , aie , 'E') + call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnE, 'E') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnE , 'E') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnE , 'E') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxE, 'E') call grid_average_X2Y('S', ss_tlty , grid_ocn_dynv, ss_tltyE, 'E') call grid_average_X2Y('S', tmass , 'T' , nmass , 'N') call grid_average_X2Y('S', aice_init, 'T' , ain , 'N') + call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnN, 'N') call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnN , 'N') call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnN , 'N') call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxN, 'N') @@ -720,7 +735,7 @@ subroutine evp (dt) call ice_dyn_evp_1d_copyin( & nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & icetmask, iceumask, & - cdn_ocn,aiU,uocnU,vocnU,forcexU,forceyU,TbU, & + Cdn_ocnU,aiU,uocnU,vocnU,forcexU,forceyU,TbU, & umassdti,fmU,uarear,tarear,strintxU,strintyU,uvel_init,vvel_init,& strength,uvel,vvel,dxT,dyT, & stressp_1 ,stressp_2, stressp_3, stressp_4, & @@ -773,7 +788,7 @@ subroutine evp (dt) ! momentum equation !----------------------------------------------------------------- call stepu (nx_block , ny_block , & - icellu (iblk), Cdn_ocn (:,:,iblk), & + icellu (iblk), Cdn_ocnU(:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & aiU (:,:,iblk), strtmp (:,:,:), & uocnU (:,:,iblk), vocnU (:,:,iblk), & @@ -924,7 +939,7 @@ subroutine evp (dt) do iblk = 1, nblocks call stepu_C (nx_block , ny_block , & ! u, E point - icelle (iblk), Cdn_ocn (:,:,iblk), & + icelle (iblk), Cdn_ocnE (:,:,iblk), & indxei (:,iblk), indxej (:,iblk), & aiE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & @@ -936,7 +951,7 @@ subroutine evp (dt) TbE (:,:,iblk)) call stepv_C (nx_block, ny_block, & ! v, N point - icelln (iblk), Cdn_ocn (:,:,iblk), & + icelln (iblk), Cdn_ocnN (:,:,iblk), & indxni (:,iblk), indxnj (:,iblk), & aiN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & @@ -1124,7 +1139,7 @@ subroutine evp (dt) do iblk = 1, nblocks call stepuv_CD (nx_block , ny_block , & ! E point - icelle (iblk), Cdn_ocn (:,:,iblk), & + icelle (iblk), Cdn_ocnE (:,:,iblk), & indxei (:,iblk), indxej (:,iblk), & aiE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & @@ -1138,7 +1153,7 @@ subroutine evp (dt) TbE (:,:,iblk)) call stepuv_CD (nx_block , ny_block , & ! N point - icelln (iblk), Cdn_ocn (:,:,iblk), & + icelln (iblk), Cdn_ocnN (:,:,iblk), & indxni (:,iblk), indxnj (:,iblk), & aiN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & @@ -1284,7 +1299,7 @@ subroutine evp (dt) do iblk = 1, nblocks call dyn_finish & (nx_block , ny_block , & - icellu (iblk), Cdn_ocn (:,:,iblk), & + icellu (iblk), Cdn_ocnU(:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & @@ -1300,7 +1315,7 @@ subroutine evp (dt) call dyn_finish & (nx_block , ny_block , & - icelln (iblk), Cdn_ocn (:,:,iblk), & + icelln (iblk), Cdn_ocnN(:,:,iblk), & indxni (:,iblk), indxnj (:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & @@ -1309,7 +1324,7 @@ subroutine evp (dt) call dyn_finish & (nx_block , ny_block , & - icelle (iblk), Cdn_ocn (:,:,iblk), & + icelle (iblk), Cdn_ocnE(:,:,iblk), & indxei (:,iblk), indxej (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & @@ -1322,6 +1337,11 @@ subroutine evp (dt) endif if (grid_ice == 'CD' .or. grid_ice == 'C') then + call ice_HaloUpdate (strintxE, halo_info, & + field_loc_Eface, field_type_vector) + call ice_HaloUpdate (strintyN, halo_info, & + field_loc_Nface, field_type_vector) + call ice_timer_stop(timer_bound) call grid_average_X2Y('S', strintxE, 'E', strintxU, 'U') ! diagnostic call grid_average_X2Y('S', strintyN, 'N', strintyU, 'U') ! diagnostic endif diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 631192587..2a0dbc4b3 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -106,7 +106,9 @@ module ice_dyn_vp indxuj(:,:) ! compressed index in j-direction real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:) ! work array for boundary updates + fld2(:,:,:,:), & ! work array for boundary updates + fld3(:,:,:,:), & ! work array for boundary updates + fld4(:,:,:,:) ! work array for boundary updates !======================================================================= @@ -142,6 +144,8 @@ subroutine init_vp indxui(nx_block*ny_block, max_blocks), & indxuj(nx_block*ny_block, max_blocks)) allocate(fld2(nx_block,ny_block,2,max_blocks)) + allocate(fld3(nx_block,ny_block,3,max_blocks)) + allocate(fld4(nx_block,ny_block,4,max_blocks)) end subroutine init_vp @@ -200,6 +204,7 @@ subroutine implicit_solver (dt) vocnU , & ! j ocean current (m/s) ss_tltxU , & ! sea surface slope, x-direction (m/m) ss_tltyU , & ! sea surface slope, y-direction (m/m) + cdn_ocnU , & ! ocn drag coefficient tmass , & ! total mass of ice and snow (kg/m^2) waterxU , & ! for ocean stress calculation, x (m/s) wateryU , & ! for ocean stress calculation, y (m/s) @@ -299,12 +304,22 @@ subroutine implicit_solver (dt) ! convert fields from T to U grid !----------------------------------------------------------------- - call grid_average_X2Y('S',tmass , 'T', umass, 'U') - call grid_average_X2Y('S',aice_init, 'T', aiU , 'U') - call grid_average_X2Y('S',uocn , grid_ocn_dynu, uocnU , 'U') - call grid_average_X2Y('S',vocn , grid_ocn_dynv, vocnU , 'U') - call grid_average_X2Y('S',ss_tltx, grid_ocn_dynu, ss_tltxU, 'U') - call grid_average_X2Y('S',ss_tlty, grid_ocn_dynv, ss_tltyU, 'U') + call stack_fields(tmass, aice_init, cdn_ocn, fld3) + call ice_HaloUpdate (fld3, halo_info, & + field_loc_center, field_type_scalar) + call stack_fields(uocn, vocn, ss_tltx, ss_tlty, fld4) + call ice_HaloUpdate (fld4, halo_info, & + field_loc_center, field_type_vector) + call unstack_fields(fld3, tmass, aice_init, cdn_ocn) + call unstack_fields(fld4, uocn, vocn, ss_tltx, ss_tlty) + + call grid_average_X2Y('S',tmass , 'T' , umass , 'U') + call grid_average_X2Y('S',aice_init, 'T' , aiU , 'U') + call grid_average_X2Y('S',cdn_ocn , 'T' , cdn_ocnU, 'U') + call grid_average_X2Y('S',uocn , grid_ocn_dynu, uocnU , 'U') + call grid_average_X2Y('S',vocn , grid_ocn_dynv, vocnU , 'U') + call grid_average_X2Y('S',ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') + call grid_average_X2Y('S',ss_tlty , grid_ocn_dynv, ss_tltyU, 'U') !---------------------------------------------------------------- ! Set wind stress to values supplied via NEMO or other forcing @@ -479,7 +494,7 @@ subroutine implicit_solver (dt) umassdti, sol , & fpresx , fpresy , & zetax2 , etax2 , & - rep_prs , & + rep_prs , cdn_ocnU,& Cb, halo_info_mask) !----------------------------------------------------------------- ! End of nonlinear iteration @@ -623,7 +638,7 @@ subroutine implicit_solver (dt) call dyn_finish & (nx_block, ny_block, & - icellu (iblk), Cdn_ocn (:,:,iblk), & + icellu (iblk), Cdn_ocnU(:,:,iblk), & indxui (:,iblk), indxuj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & @@ -669,10 +684,9 @@ subroutine anderson_solver (icellt , icellu , & umassdti, sol , & fpresx , fpresy , & zetax2 , etax2 , & - rep_prs , & + rep_prs , cdn_ocn, & Cb, halo_info_mask) - use ice_arrays_column, only: Cdn_ocn use ice_blocks, only: nx_block, ny_block use ice_boundary, only: ice_HaloUpdate use ice_constants, only: c1 @@ -702,6 +716,7 @@ subroutine anderson_solver (icellt , icellu , & aiU , & ! ice fraction on u-grid uocn , & ! i ocean current (m/s) vocn , & ! j ocean current (m/s) + cdn_ocn , & ! ocn drag coefficient waterxU , & ! for ocean stress calculation, x (m/s) wateryU , & ! for ocean stress calculation, y (m/s) bxfix , & ! part of bx that is constant during Picard diff --git a/cicecore/shared/ice_arrays_column.F90 b/cicecore/shared/ice_arrays_column.F90 index c9e8be8db..b4727d3fd 100644 --- a/cicecore/shared/ice_arrays_column.F90 +++ b/cicecore/shared/ice_arrays_column.F90 @@ -24,6 +24,7 @@ module ice_arrays_column public :: alloc_arrays_column ! icepack_atmo.F90 + ! Cdn variables on the T-grid real (kind=dbl_kind), public, & dimension (:,:,:), allocatable :: & Cdn_atm , & ! atm drag coefficient From 2435fa7824b88903acb7d2d3a616d5975dee8164 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Mon, 17 Oct 2022 06:42:33 -0700 Subject: [PATCH 28/57] Change icetmask to logical consistent with iceumask, icenmask, iceemask (#773) * Change icetmask to logical consistent with iceumask, icenmask, iceemask - Add icetmask as logical array to ice_grid.F90, was integer array - Update use of icetmask in code for consistency with new type - Add ice_HaloUpdate2DL1 to support halo updates for logical fields in both mpi and serial ice_boundary.F90 - Modify some capital T,U,N,E in ice_dyn_shared.F90 to t,u,n,e for better consistency in code * Update cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 * Update comment in code * Revert changes to T,U,N,E in ice_dyn_shared.F90, working toward additional changes * Move ice[T,U,N,E}mask from ice_grid to ice_dyn_shared * rename icell[t,u,n,e] to icell[T,U,N,E], rename indx[t,u,n,e] to indx[T,U,N,E] * remove ice[t,u,n,e]grid from ice_grid --- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 97 ++-- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 303 ++++++----- cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 | 54 +- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 48 +- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 509 +++++++++--------- .../infrastructure/comm/mpi/ice_boundary.F90 | 64 +++ .../comm/serial/ice_boundary.F90 | 64 +++ cicecore/cicedynB/infrastructure/ice_grid.F90 | 8 - .../infrastructure/ice_restart_driver.F90 | 32 +- doc/source/cice_index.rst | 8 +- doc/source/user_guide/ug_implementation.rst | 6 +- 11 files changed, 657 insertions(+), 536 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index ea254cbc0..28a047c4e 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -134,7 +134,7 @@ subroutine eap (dt) dyn_prep1, dyn_prep2, stepu, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & seabed_stress_method, seabed_stress, & - stack_fields, unstack_fields + stack_fields, unstack_fields, iceTmask, iceUmask use ice_flux, only: rdg_conv, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & @@ -144,7 +144,7 @@ subroutine eap (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, grid_average_X2Y, iceumask, & + tarear, uarear, grid_average_X2Y, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength @@ -163,14 +163,14 @@ subroutine eap (dt) i, j, ij integer (kind=int_kind), dimension(max_blocks) :: & - icellt , & ! no. of cells where icetmask = 1 - icellu ! no. of cells where iceumask = 1 + icellT , & ! no. of cells where iceTmask = .true. + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj , & ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & uocnU , & ! i ocean current (m/s) @@ -198,7 +198,6 @@ subroutine eap (dt) calc_strair integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & - icetmask , & ! ice extent mask (T-cell) halomask ! ice mask for halo update type (ice_halo) :: & @@ -256,13 +255,13 @@ subroutine eap (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - tmass (:,:,iblk), icetmask(:,:,iblk)) + tmass (:,:,iblk), iceTmask(:,:,iblk)) enddo ! iblk !$OMP END PARALLEL DO call ice_timer_start(timer_bound) - call ice_HaloUpdate (icetmask, halo_info, & + call ice_HaloUpdate (iceTmask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -324,16 +323,16 @@ subroutine eap (dt) call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icellu (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellT (iblk), icellU (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & + iceTmask (:,:,iblk), iceUmask (:,:,iblk), & fmU (:,:,iblk), dt, & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & @@ -357,7 +356,7 @@ subroutine eap (dt) do j = 1, ny_block do i = 1, nx_block - if (icetmask(i,j,iblk)==0) then + if (.not.iceTmask(i,j,iblk)) then if (tmask(i,j,iblk)) then ! structure tensor a11_1(i,j,iblk) = p5 @@ -374,7 +373,7 @@ subroutine eap (dt) a12_2(i,j,iblk) = c0 a12_3(i,j,iblk) = c0 a12_4(i,j,iblk) = c0 - endif ! icetmask + endif ! iceTmask enddo ! i enddo ! j @@ -384,9 +383,9 @@ subroutine eap (dt) !----------------------------------------------------------------- strength(:,:,iblk) = c0 ! initialize - do ij = 1, icellt(iblk) - i = indxti(ij, iblk) - j = indxtj(ij, iblk) + do ij = 1, icellT(iblk) + i = indxTi(ij, iblk) + j = indxTj(ij, iblk) call icepack_ice_strength(ncat=ncat, & aice = aice (i,j, iblk), & vice = vice (i,j, iblk), & @@ -415,7 +414,7 @@ subroutine eap (dt) if (maskhalo_dyn) then call ice_timer_start(timer_bound) halomask = 0 - where (iceumask) halomask = 1 + where (iceUmask) halomask = 1 call ice_HaloUpdate (halomask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -431,8 +430,8 @@ subroutine eap (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbU (:,:,iblk)) enddo @@ -442,8 +441,8 @@ subroutine eap (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block , ny_block , & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & + icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk), & hwater (:,:,iblk), TbU (:,:,iblk)) enddo @@ -463,8 +462,8 @@ subroutine eap (dt) ! call ice_timer_start(timer_tmp1,iblk) call stress_eap (nx_block, ny_block, & ksub, ndte, & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & arlx1i, denom1, & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & @@ -501,8 +500,8 @@ subroutine eap (dt) ! call ice_timer_start(timer_tmp2,iblk) call stepu (nx_block, ny_block, & - icellu (iblk), Cdn_ocnU (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU (:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), strtmp (:,:,:), & uocnU (:,:,iblk), vocnU (:,:,iblk), & waterxU (:,:,iblk), wateryU (:,:,iblk), & @@ -523,8 +522,8 @@ subroutine eap (dt) ! call ice_timer_start(timer_tmp3,iblk) if (mod(ksub,10) == 1) then ! only called every 10th timestep call stepa (nx_block , ny_block , & - dtei , icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + dtei , icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & a11 (:,:,iblk), a12 (:,:,iblk), & a11_1 (:,:,iblk), a11_2 (:,:,iblk), & a11_3 (:,:,iblk), a11_4 (:,:,iblk), & @@ -567,8 +566,8 @@ subroutine eap (dt) call dyn_finish & (nx_block, ny_block, & - icellu (iblk), Cdn_ocnU(:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU(:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & aiU (:,:,iblk), fmU (:,:,iblk), & @@ -1154,8 +1153,8 @@ end FUNCTION s22ks subroutine stress_eap (nx_block, ny_block, & ksub, ndte, & - icellt, & - indxti, indxtj, & + icellT, & + indxTi, indxTj, & arlx1i, denom1, & uvel, vvel, & dxT, dyT, & @@ -1187,11 +1186,11 @@ subroutine stress_eap (nx_block, ny_block, & nx_block, ny_block, & ! block dimensions ksub , & ! subcycling step ndte , & ! number of subcycles - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), intent(in) :: & arlx1i , & ! dte/2T (original) or 1/alpha1 (revised) @@ -1274,9 +1273,9 @@ subroutine stress_eap (nx_block, ny_block, & strtmp(:,:,:) = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1878,8 +1877,8 @@ end subroutine update_stress_rdg ! Solves evolution equation for structure tensor (A19, A20) subroutine stepa (nx_block, ny_block, & - dtei, icellt, & - indxti, indxtj, & + dtei, icellT, & + indxTi, indxTj, & a11, a12, & a11_1, a11_2, a11_3, a11_4, & a12_1, a12_2, a12_3, a12_4, & @@ -1892,14 +1891,14 @@ subroutine stepa (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. real (kind=dbl_kind), intent(in) :: & dtei ! 1/dte, where dte is subcycling timestep (1/s) integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & ! ice stress tensor (kg/s^2) in each corner of T cell @@ -1929,9 +1928,9 @@ subroutine stepa (nx_block, ny_block, & dteikth = c1 / (dtei + kth) p5kth = p5 * kth - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) ! ne call calc_ffrac(stressp_1(i,j), stressm_1(i,j), & diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index d1264bae7..8eab5e260 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -99,7 +99,6 @@ subroutine evp (dt) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_grid, only: tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & - iceumask, iceemask, icenmask, & dxE, dxN, dxT, dxU, dyE, dyN, dyT, dyU, & ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & dxhy, dyhx, cxp, cyp, cxm, cym, & @@ -116,6 +115,7 @@ subroutine evp (dt) use ice_dyn_shared, only: evp_algorithm, stack_fields, unstack_fields, & DminTarea, visc_method, deformations, deformationsC_T, deformationsCD_T, & strain_rates_U, & + iceTmask, iceUmask, iceEmask, iceNmask, & dyn_haloUpdate real (kind=dbl_kind), intent(in) :: & @@ -130,20 +130,20 @@ subroutine evp (dt) i, j, ij ! local indices integer (kind=int_kind), dimension(max_blocks) :: & - icellt , & ! no. of cells where icetmask = 1 - icelln , & ! no. of cells where icenmask = .true. - icelle , & ! no. of cells where iceemask = .true. - icellu ! no. of cells where iceumask = .true. + icellT , & ! no. of cells where iceTmask = .true. + icellN , & ! no. of cells where iceNmask = .true. + icellE , & ! no. of cells where iceEmask = .true. + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxei , & ! compressed index in i-direction - indxej , & ! compressed index in j-direction - indxni , & ! compressed index in i-direction - indxnj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj , & ! compressed index in j-direction + indxEi , & ! compressed index in i-direction + indxEj , & ! compressed index in j-direction + indxNi , & ! compressed index in i-direction + indxNj , & ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & uocnU , & ! i ocean current (m/s) @@ -210,7 +210,6 @@ subroutine evp (dt) calc_strair ! calculate air/ice stress integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & - icetmask, & ! ice extent mask (T-cell) halomask ! generic halo mask type (ice_halo) :: & @@ -301,13 +300,13 @@ subroutine evp (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - tmass (:,:,iblk), icetmask(:,:,iblk)) + tmass (:,:,iblk), iceTmask(:,:,iblk)) enddo ! iblk !$OMP END PARALLEL DO call ice_timer_start(timer_bound) - call ice_HaloUpdate (icetmask, halo_info, & + call ice_HaloUpdate (iceTmask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -400,16 +399,16 @@ subroutine evp (dt) if (trim(grid_ice) == 'B') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icellu (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellT (iblk), icellU (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & + iceTmask (:,:,iblk), iceUmask (:,:,iblk), & fmU (:,:,iblk), dt, & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & @@ -430,16 +429,16 @@ subroutine evp (dt) elseif (trim(grid_ice) == 'CD' .or. grid_ice == 'C') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icellu (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellT (iblk), icellU (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umaskCD (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & + iceTmask (:,:,iblk), iceUmask (:,:,iblk), & fmU (:,:,iblk), dt, & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & @@ -463,9 +462,9 @@ subroutine evp (dt) !----------------------------------------------------------------- strength(:,:,iblk) = c0 ! initialize - do ij = 1, icellt(iblk) - i = indxti(ij, iblk) - j = indxtj(ij, iblk) + do ij = 1, icellT(iblk) + i = indxTi(ij, iblk) + j = indxTj(ij, iblk) call icepack_ice_strength(ncat = ncat, & aice = aice (i,j, iblk), & vice = vice (i,j, iblk), & @@ -495,16 +494,16 @@ subroutine evp (dt) call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icelln (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellT (iblk), icellN (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & aiN (:,:,iblk), nmass (:,:,iblk), & nmassdti (:,:,iblk), fcorN_blk (:,:,iblk), & nmask (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & strairxN (:,:,iblk), strairyN (:,:,iblk), & ss_tltxN (:,:,iblk), ss_tltyN (:,:,iblk), & - icetmask (:,:,iblk), icenmask (:,:,iblk), & + iceTmask (:,:,iblk), iceNmask (:,:,iblk), & fmN (:,:,iblk), dt, & strtltxN (:,:,iblk), strtltyN (:,:,iblk), & strocnxN (:,:,iblk), strocnyN (:,:,iblk), & @@ -528,16 +527,16 @@ subroutine evp (dt) call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt (iblk), icelle (iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellT (iblk), icellE (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & aiE (:,:,iblk), emass (:,:,iblk), & emassdti (:,:,iblk), fcorE_blk (:,:,iblk), & emask (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & strairxE (:,:,iblk), strairyE (:,:,iblk), & ss_tltxE (:,:,iblk), ss_tltyE (:,:,iblk), & - icetmask (:,:,iblk), iceemask (:,:,iblk), & + iceTmask (:,:,iblk), iceEmask (:,:,iblk), & fmE (:,:,iblk), dt, & strtltxE (:,:,iblk), strtltyE (:,:,iblk), & strocnxE (:,:,iblk), strocnyE (:,:,iblk), & @@ -558,12 +557,12 @@ subroutine evp (dt) do i=1,nx_block do j=1,ny_block - if (.not.iceumask(i,j,iblk)) then + if (.not.iceUmask(i,j,iblk)) then stresspU (i,j,iblk) = c0 stressmU (i,j,iblk) = c0 stress12U(i,j,iblk) = c0 endif - if (icetmask(i,j,iblk) == 0) then + if (.not.iceTmask(i,j,iblk)) then stresspT (i,j,iblk) = c0 stressmT (i,j,iblk) = c0 stress12T(i,j,iblk) = c0 @@ -622,7 +621,7 @@ subroutine evp (dt) if (maskhalo_dyn) then halomask = 0 if (grid_ice == 'B') then - where (iceumask) halomask = 1 + where (iceUmask) halomask = 1 elseif (grid_ice == 'C' .or. grid_ice == 'CD') then !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,i,j) SCHEDULE(runtime) do iblk = 1, nblocks @@ -633,11 +632,11 @@ subroutine evp (dt) jhi = this_block%jhi do j = jlo,jhi do i = ilo,ihi - if (icetmask(i ,j ,iblk) /= 0 .or. & - icetmask(i-1,j ,iblk) /= 0 .or. & - icetmask(i+1,j ,iblk) /= 0 .or. & - icetmask(i ,j-1,iblk) /= 0 .or. & - icetmask(i ,j+1,iblk) /= 0) then + if (iceTmask(i ,j ,iblk) .or. & + iceTmask(i-1,j ,iblk) .or. & + iceTmask(i+1,j ,iblk) .or. & + iceTmask(i ,j-1,iblk) .or. & + iceTmask(i ,j+1,iblk)) then halomask(i,j,iblk) = 1 endif enddo @@ -664,8 +663,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbU (:,:,iblk)) enddo @@ -675,8 +674,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block , ny_block , & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & + icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & hwater (:,:,iblk), TbU (:,:,iblk)) enddo @@ -689,13 +688,13 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block , ny_block, & - icelle (iblk), & - indxei (:,iblk), indxej(:,iblk), & + icellE (iblk), & + indxEi (:,iblk), indxEj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbE (:,:,iblk)) call seabed_stress_factor_LKD (nx_block , ny_block, & - icelln (iblk), & - indxni (:,iblk), indxnj(:,iblk), & + icellN (iblk), & + indxNi (:,iblk), indxNj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbN (:,:,iblk)) enddo @@ -705,13 +704,13 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block , ny_block , & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & + icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & hwater (:,:,iblk), TbU (:,:,iblk) , & TbE (:,:,iblk), TbN (:,:,iblk) , & - icelle(iblk), indxei(:,iblk), indxej(:,iblk), & - icelln(iblk), indxni(:,iblk), indxnj(:,iblk) ) + icellE(iblk), indxEi(:,iblk), indxEj(:,iblk), & + icellN(iblk), indxNi(:,iblk), indxNj(:,iblk) ) enddo !$OMP END PARALLEL DO endif @@ -734,8 +733,8 @@ subroutine evp (dt) call ice_timer_start(timer_evp_1d) call ice_dyn_evp_1d_copyin( & nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & - icetmask, iceumask, & - Cdn_ocnU,aiU,uocnU,vocnU,forcexU,forceyU,TbU, & + iceTmask, iceUmask, & + cdn_ocnU,aiU,uocnU,vocnU,forcexU,forceyU,TbU, & umassdti,fmU,uarear,tarear,strintxU,strintyU,uvel_init,vvel_init,& strength,uvel,vvel,dxT,dyT, & stressp_1 ,stressp_2, stressp_3, stressp_4, & @@ -767,8 +766,8 @@ subroutine evp (dt) ! stress tensor equation, total surface stress !----------------------------------------------------------------- call stress (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & dxhy (:,:,iblk), dyhx (:,:,iblk), & @@ -788,8 +787,8 @@ subroutine evp (dt) ! momentum equation !----------------------------------------------------------------- call stepu (nx_block , ny_block , & - icellu (iblk), Cdn_ocnU(:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU(:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), strtmp (:,:,:), & uocnU (:,:,iblk), vocnU (:,:,iblk), & waterxU (:,:,iblk), wateryU (:,:,iblk), & @@ -820,8 +819,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call deformations (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & @@ -845,8 +844,8 @@ subroutine evp (dt) ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- call strain_rates_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & @@ -869,8 +868,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stressC_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & dxN (:,:,iblk), dyE (:,:,iblk), & @@ -897,8 +896,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stressC_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uarea (:,:,iblk), & etax2U (:,:,iblk), deltaU (:,:,iblk), & strengthU (:,:,iblk), shearU (:,:,iblk), & @@ -915,8 +914,8 @@ subroutine evp (dt) do iblk = 1, nblocks call div_stress_Ex (nx_block , ny_block , & - icelle (iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), & + indxEi (:,iblk), indxEj (:,iblk), & dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & earear (:,:,iblk) , & @@ -924,8 +923,8 @@ subroutine evp (dt) stress12U (:,:,iblk), strintxE (:,:,iblk) ) call div_stress_Ny (nx_block , ny_block , & - icelln (iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), & + indxNi (:,iblk), indxNj (:,iblk), & dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & narear (:,:,iblk) , & @@ -939,8 +938,8 @@ subroutine evp (dt) do iblk = 1, nblocks call stepu_C (nx_block , ny_block , & ! u, E point - icelle (iblk), Cdn_ocnE (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), Cdn_ocnE (:,:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & aiE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & waterxE (:,:,iblk), forcexE (:,:,iblk), & @@ -951,8 +950,8 @@ subroutine evp (dt) TbE (:,:,iblk)) call stepv_C (nx_block, ny_block, & ! v, N point - icelln (iblk), Cdn_ocnN (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), Cdn_ocnN (:,:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & aiN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & wateryN (:,:,iblk), forceyN (:,:,iblk), & @@ -1005,8 +1004,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks call deformationsC_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & dxN (:,:,iblk), dyE (:,:,iblk), & @@ -1025,8 +1024,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stressCD_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & dxN (:,:,iblk), dyE (:,:,iblk), & @@ -1059,8 +1058,8 @@ subroutine evp (dt) ! NOTE these are actually strain rates * area (m^2/s) !----------------------------------------------------------------- call strain_rates_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & @@ -1073,8 +1072,8 @@ subroutine evp (dt) shearU (:,:,iblk), DeltaU (:,:,iblk) ) call stressCD_U (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uarea (:,:,iblk), & zetax2U (:,:,iblk), etax2U (:,:,iblk), & strengthU(:,:,iblk), & @@ -1097,8 +1096,8 @@ subroutine evp (dt) do iblk = 1, nblocks call div_stress_Ex (nx_block , ny_block , & - icelle (iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), & + indxEi (:,iblk), indxEj (:,iblk), & dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & earear (:,:,iblk) , & @@ -1106,8 +1105,8 @@ subroutine evp (dt) stress12U (:,:,iblk), strintxE (:,:,iblk) ) call div_stress_Ey (nx_block , ny_block , & - icelle (iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), & + indxEi (:,iblk), indxEj (:,iblk), & dxE (:,:,iblk), dyE (:,:,iblk), & dxU (:,:,iblk), dyT (:,:,iblk), & earear (:,:,iblk) , & @@ -1115,8 +1114,8 @@ subroutine evp (dt) stress12T (:,:,iblk), strintyE (:,:,iblk) ) call div_stress_Nx (nx_block , ny_block , & - icelln (iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), & + indxNi (:,iblk), indxNj (:,iblk), & dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & narear (:,:,iblk) , & @@ -1124,8 +1123,8 @@ subroutine evp (dt) stress12T (:,:,iblk), strintxN (:,:,iblk) ) call div_stress_Ny (nx_block , ny_block , & - icelln (iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), & + indxNi (:,iblk), indxNj (:,iblk), & dxN (:,:,iblk), dyN (:,:,iblk), & dxT (:,:,iblk), dyU (:,:,iblk), & narear (:,:,iblk) , & @@ -1139,8 +1138,8 @@ subroutine evp (dt) do iblk = 1, nblocks call stepuv_CD (nx_block , ny_block , & ! E point - icelle (iblk), Cdn_ocnE (:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), Cdn_ocnE (:,:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & aiE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & waterxE (:,:,iblk), wateryE (:,:,iblk), & @@ -1153,8 +1152,8 @@ subroutine evp (dt) TbE (:,:,iblk)) call stepuv_CD (nx_block , ny_block , & ! N point - icelln (iblk), Cdn_ocnN (:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), Cdn_ocnN (:,:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & aiN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & waterxN (:,:,iblk), wateryN (:,:,iblk), & @@ -1196,8 +1195,8 @@ subroutine evp (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call deformationsCD_T (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & dxN (:,:,iblk), dyE (:,:,iblk), & @@ -1299,8 +1298,8 @@ subroutine evp (dt) do iblk = 1, nblocks call dyn_finish & (nx_block , ny_block , & - icellu (iblk), Cdn_ocnU(:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU(:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & aiU (:,:,iblk), fmU (:,:,iblk), & @@ -1315,8 +1314,8 @@ subroutine evp (dt) call dyn_finish & (nx_block , ny_block , & - icelln (iblk), Cdn_ocnN(:,:,iblk), & - indxni (:,iblk), indxnj (:,iblk), & + icellN (iblk), Cdn_ocnN(:,:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & uvelN (:,:,iblk), vvelN (:,:,iblk), & uocnN (:,:,iblk), vocnN (:,:,iblk), & aiN (:,:,iblk), fmN (:,:,iblk), & @@ -1324,8 +1323,8 @@ subroutine evp (dt) call dyn_finish & (nx_block , ny_block , & - icelle (iblk), Cdn_ocnE(:,:,iblk), & - indxei (:,iblk), indxej (:,iblk), & + icellE (iblk), Cdn_ocnE(:,:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & uvelE (:,:,iblk), vvelE (:,:,iblk), & uocnE (:,:,iblk), vocnE (:,:,iblk), & aiE (:,:,iblk), fmE (:,:,iblk), & @@ -1358,8 +1357,8 @@ end subroutine evp ! author: Elizabeth C. Hunke, LANL subroutine stress (nx_block, ny_block, & - icellt, & - indxti, indxtj, & + icellT, & + indxTi, indxTj, & uvel, vvel, & dxT, dyT, & dxhy, dyhx, & @@ -1379,11 +1378,11 @@ subroutine stress (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & strength , & ! ice strength (N/m) @@ -1439,9 +1438,9 @@ subroutine stress (nx_block, ny_block, & str(:,:,:) = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1660,8 +1659,8 @@ end subroutine stress ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. subroutine stressC_T (nx_block, ny_block , & - icellt , & - indxti , indxtj , & + icellT , & + indxTi , indxTj , & uvelE , vvelE , & uvelN , vvelN , & dxN , dyE , & @@ -1676,11 +1675,11 @@ subroutine stressC_T (nx_block, ny_block , & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvelE , & ! x-component of velocity (m/s) at the E point @@ -1723,17 +1722,17 @@ subroutine stressC_T (nx_block, ny_block , & !----------------------------------------------------------------- call strain_rates_T (nx_block , ny_block , & - icellt , & - indxti(:) , indxtj (:) , & + icellT , & + indxTi(:) , indxTj (:) , & uvelE (:,:), vvelE (:,:), & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & dxT (:,:), dyT (:,:), & divT (:,:), tensionT(:,:) ) - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! Square of shear strain rate at T obtained from interpolation of @@ -1785,8 +1784,8 @@ end subroutine stressC_T ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. subroutine stressC_U (nx_block , ny_block, & - icellu, & - indxui , indxuj, & + icellU, & + indxUi , indxUj, & uarea , & etax2U , deltaU, & strengthU, shearU, & @@ -1797,11 +1796,11 @@ subroutine stressC_U (nx_block , ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where iceumask = 1 + icellU ! no. of cells where iceUmask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uarea , & ! area of U point @@ -1834,17 +1833,17 @@ subroutine stressC_U (nx_block , ny_block, & !----------------------------------------------------------------- if (visc_method == 'avg_zeta') then - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & + arlx1i*p5*etax2U(i,j)*shearU(i,j)) * denom1 enddo elseif (visc_method == 'avg_strength') then - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) DminUarea = deltaminEVP*uarea(i,j) ! only need etax2U here, but other terms are calculated with etax2U ! minimal extra calculations here even though it seems like there is @@ -1865,8 +1864,8 @@ end subroutine stressC_U ! Nov 2021 subroutine stressCD_T (nx_block, ny_block, & - icellt, & - indxti, indxtj, & + icellT, & + indxTi, indxTj, & uvelE, vvelE, & uvelN, vvelN, & dxN, dyE, & @@ -1882,11 +1881,11 @@ subroutine stressCD_T (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvelE , & ! x-component of velocity (m/s) at the E point @@ -1929,8 +1928,8 @@ subroutine stressCD_T (nx_block, ny_block, & !----------------------------------------------------------------- call strain_rates_T (nx_block , ny_block , & - icellt , & - indxti(:) , indxtj (:) , & + icellT , & + indxTi(:) , indxTj (:) , & uvelE (:,:), vvelE (:,:), & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & @@ -1938,9 +1937,9 @@ subroutine stressCD_T (nx_block, ny_block, & divT (:,:), tensionT(:,:), & shearT(:,:), DeltaT (:,:) ) - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! viscosities and replacement pressure at T point @@ -1975,8 +1974,8 @@ end subroutine stressCD_T ! Nov 2021 subroutine stressCD_U (nx_block, ny_block, & - icellu, & - indxui, indxuj, & + icellU, & + indxUi, indxUj, & uarea, & zetax2U, etax2U, & strengthU, & @@ -1991,11 +1990,11 @@ subroutine stressCD_U (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where iceumask = 1 + icellU ! no. of cells where iceUmask = 1 integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uarea , & ! area of U-cell (m^2) @@ -2025,9 +2024,9 @@ subroutine stressCD_U (nx_block, ny_block, & character(len=*), parameter :: subname = '(stressCD_U)' - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) !----------------------------------------------------------------- ! viscosities and replacement pressure at U point diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 index fe04a3d63..e874611bd 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 @@ -1017,7 +1017,7 @@ end subroutine dealloc1d !======================================================================= subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & - I_icetmask, I_iceumask, I_cdn_ocn, I_aiu, I_uocn, I_vocn, & + I_iceTmask, I_iceUmask, I_cdn_ocn, I_aiu, I_uocn, I_vocn, & I_forcex, I_forcey, I_Tbu, I_umassdti, I_fm, I_uarear, I_tarear, & I_strintx, I_strinty, I_uvel_init, I_vvel_init, I_strength, & I_uvel, I_vvel, I_dxT, I_dyT, I_stressp_1, I_stressp_2, & @@ -1035,9 +1035,7 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & integer(int_kind), intent(in) :: nx, ny, nblk, nx_glob, ny_glob logical(kind=log_kind), dimension(nx, ny, nblk), intent(in) :: & - I_iceumask - integer(kind=int_kind), dimension(nx, ny, nblk), intent(in) :: & - I_icetmask + I_iceTmask, I_iceUmask real(kind=dbl_kind), dimension(nx, ny, nblk), intent(in) :: & I_cdn_ocn, I_aiu, I_uocn, I_vocn, I_forcex, I_forcey, I_Tbu, & I_umassdti, I_fm, I_uarear, I_tarear, I_strintx, I_strinty, & @@ -1049,9 +1047,7 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & ! local variables logical(kind=log_kind), dimension(nx_glob, ny_glob) :: & - G_iceumask - integer(kind=int_kind), dimension(nx_glob, ny_glob) :: & - G_icetmask + G_iceTmask, G_iceUmask real(kind=dbl_kind), dimension(nx_glob, ny_glob) :: & G_cdn_ocn, G_aiu, G_uocn, G_vocn, G_forcex, G_forcey, G_Tbu, & G_umassdti, G_fm, G_uarear, G_tarear, G_strintx, G_strinty, & @@ -1063,8 +1059,8 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & character(len=*), parameter :: & subname = '(ice_dyn_evp_1d_copyin)' - call gather_global_ext(G_icetmask, I_icetmask, master_task, distrb_info ) - call gather_global_ext(G_iceumask, I_iceumask, master_task, distrb_info ) + call gather_global_ext(G_iceTmask, I_iceTmask, master_task, distrb_info ) + call gather_global_ext(G_iceUmask, I_iceUmask, master_task, distrb_info ) call gather_global_ext(G_cdn_ocn, I_cdn_ocn, master_task, distrb_info ) call gather_global_ext(G_aiu, I_aiu, master_task, distrb_info ) call gather_global_ext(G_uocn, I_uocn, master_task, distrb_info ) @@ -1101,9 +1097,9 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & ! all calculations id done on master task if (my_task == master_task) then ! find number of active points and allocate 1D vectors - call calc_na(nx_glob, ny_glob, NA_len, G_icetmask, G_iceumask) + call calc_na(nx_glob, ny_glob, NA_len, G_iceTmask, G_iceUmask) call alloc1d(NA_len) - call calc_2d_indices(nx_glob, ny_glob, NA_len, G_icetmask, G_iceumask) + call calc_2d_indices(nx_glob, ny_glob, NA_len, G_iceTmask, G_iceUmask) call calc_navel(nx_glob, ny_glob, NA_len, NAVEL_len) call alloc1d_navel(NAVEL_len) ! initialize OpenMP. FIXME: ought to be called from main @@ -1120,7 +1116,7 @@ subroutine ice_dyn_evp_1d_copyin(nx, ny, nblk, nx_glob, ny_glob, & G_stressp_2, G_stressp_3, G_stressp_4, G_stressm_1, & G_stressm_2, G_stressm_3, G_stressm_4, G_stress12_1, & G_stress12_2, G_stress12_3, G_stress12_4) - call calc_halo_parent(nx_glob, ny_glob, NA_len, NAVEL_len, G_icetmask) + call calc_halo_parent(nx_glob, ny_glob, NA_len, NAVEL_len, G_iceTmask) end if end subroutine ice_dyn_evp_1d_copyin @@ -1336,7 +1332,7 @@ end subroutine ice_dyn_evp_1d_kernel !======================================================================= - subroutine calc_na(nx, ny, na, icetmask, iceumask) + subroutine calc_na(nx, ny, na, iceTmask, iceUmask) ! Calculate number of active points use ice_blocks, only : nghost @@ -1344,10 +1340,8 @@ subroutine calc_na(nx, ny, na, icetmask, iceumask) implicit none integer(kind=int_kind), intent(in) :: nx, ny - integer(kind=int_kind), dimension(nx, ny), intent(in) :: & - icetmask logical(kind=log_kind), dimension(nx, ny), intent(in) :: & - iceumask + iceTmask, iceUmask integer(kind=int_kind), intent(out) :: na ! local variables @@ -1360,7 +1354,7 @@ subroutine calc_na(nx, ny, na, icetmask, iceumask) ! NOTE: T mask includes northern and eastern ghost cells do j = 1 + nghost, ny do i = 1 + nghost, nx - if (icetmask(i,j) == 1 .or. iceumask(i,j)) na = na + 1 + if (iceTmask(i,j) .or. iceUmask(i,j)) na = na + 1 end do end do @@ -1368,17 +1362,15 @@ end subroutine calc_na !======================================================================= - subroutine calc_2d_indices(nx, ny, na, icetmask, iceumask) + subroutine calc_2d_indices(nx, ny, na, iceTmask, iceUmask) use ice_blocks, only : nghost implicit none integer(kind=int_kind), intent(in) :: nx, ny, na - integer(kind=int_kind), dimension(nx, ny), intent(in) :: & - icetmask logical(kind=log_kind), dimension(nx, ny), intent(in) :: & - iceumask + iceTmask, iceUmask ! local variables @@ -1394,12 +1386,12 @@ subroutine calc_2d_indices(nx, ny, na, icetmask, iceumask) ! NOTE: T mask includes northern and eastern ghost cells do j = 1 + nghost, ny do i = 1 + nghost, nx - if (icetmask(i,j) == 1 .or. iceumask(i,j)) then + if (iceTmask(i,j) .or. iceUmask(i,j)) then Nmaskt = Nmaskt + 1 indi(Nmaskt) = i indj(Nmaskt) = j - if (icetmask(i,j) /= 1) skiptcell(Nmaskt) = .true. - if (.not. iceumask(i,j)) skipucell(Nmaskt) = .true. + if (.not. iceTmask(i,j)) skiptcell(Nmaskt) = .true. + if (.not. iceUmask(i,j)) skipucell(Nmaskt) = .true. ! NOTE: U mask does not include northern and eastern ! ghost cells. Skip northern and eastern ghost cells if (i == nx) skipucell(Nmaskt) = .true. @@ -1588,13 +1580,13 @@ end subroutine convert_2d_1d !======================================================================= - subroutine calc_halo_parent(nx, ny, na, navel, I_icetmask) + subroutine calc_halo_parent(nx, ny, na, navel, I_iceTmask) implicit none integer(kind=int_kind), intent(in) :: nx, ny, na, navel - integer(kind=int_kind), dimension(nx, ny), intent(in) :: & - I_icetmask + logical(kind=log_kind), dimension(nx, ny), intent(in) :: & + I_iceTmask ! local variables @@ -1619,10 +1611,10 @@ subroutine calc_halo_parent(nx, ny, na, navel, I_icetmask) j = int((indij(iw) - 1) / (nx)) + 1 i = indij(iw) - (j - 1) * nx ! if within ghost zone - if (i == nx .and. I_icetmask(2, j) == 1) Ihalo(iw) = 2 + (j - 1) * nx - if (i == 1 .and. I_icetmask(nx - 1, j) == 1) Ihalo(iw) = (nx - 1) + (j - 1) * nx - if (j == ny .and. I_icetmask(i, 2) == 1) Ihalo(iw) = i + nx - if (j == 1 .and. I_icetmask(i, ny - 1) == 1) Ihalo(iw) = i + (ny - 2) * nx + if (i == nx .and. I_iceTmask(2, j) ) Ihalo(iw) = 2 + (j - 1) * nx + if (i == 1 .and. I_iceTmask(nx - 1, j) ) Ihalo(iw) = (nx - 1) + (j - 1) * nx + if (j == ny .and. I_iceTmask(i, 2) ) Ihalo(iw) = i + nx + if (j == 1 .and. I_iceTmask(i, ny - 1) ) Ihalo(iw) = i + (ny - 2) * nx end do ! relate halo indices to indij vector diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 95d2eedb1..187ec55cc 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -106,6 +106,12 @@ module ice_dyn_shared uvelE_init , & ! x-component of velocity (m/s), beginning of timestep vvelE_init ! y-component of velocity (m/s), beginning of timestep + logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & + iceTmask, & ! ice extent mask (T-cell) + iceUmask, & ! ice extent mask (U-cell) + iceNmask, & ! ice extent mask (N-cell) + iceEmask ! ice extent mask (E-cell) + real (kind=dbl_kind), allocatable, public :: & DminTarea(:,:,:) ! deltamin * tarea (m^2/s) @@ -168,6 +174,8 @@ subroutine alloc_dyn_shared allocate( & uvel_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep vvel_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep + iceTmask (nx_block,ny_block,max_blocks), & ! T mask for dynamics + iceUmask (nx_block,ny_block,max_blocks), & ! U mask for dynamics stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') @@ -177,6 +185,8 @@ subroutine alloc_dyn_shared vvelE_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep uvelN_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep vvelN_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep + iceEmask (nx_block,ny_block,max_blocks), & ! T mask for dynamics + iceNmask (nx_block,ny_block,max_blocks), & ! U mask for dynamics stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') endif @@ -199,7 +209,7 @@ subroutine init_dyn (dt) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN, divu, shear - use ice_grid, only: ULAT, NLAT, ELAT, tarea, iceumask, iceemask, icenmask + use ice_grid, only: ULAT, NLAT, ELAT, tarea real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -310,10 +320,10 @@ subroutine init_dyn (dt) endif ! ice extent mask on velocity points - iceumask(i,j,iblk) = .false. + iceUmask(i,j,iblk) = .false. if (grid_ice == 'CD' .or. grid_ice == 'C') then - iceemask(i,j,iblk) = .false. - icenmask(i,j,iblk) = .false. + iceEmask(i,j,iblk) = .false. + iceNmask(i,j,iblk) = .false. end if enddo ! i enddo ! j @@ -394,7 +404,7 @@ subroutine dyn_prep1 (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & Tmass ! total mass of ice and snow (kg/m^2) - integer (kind=int_kind), dimension (nx_block,ny_block), intent(out) :: & + logical (kind=log_kind), dimension (nx_block,ny_block), intent(out) :: & iceTmask ! ice extent mask (T-cell) ! local variables @@ -438,7 +448,7 @@ subroutine dyn_prep1 (nx_block, ny_block, & !----------------------------------------------------------------- ! augmented mask (land + open ocean) !----------------------------------------------------------------- - iceTmask (i,j) = 0 + iceTmask (i,j) = .false. enddo enddo @@ -450,10 +460,10 @@ subroutine dyn_prep1 (nx_block, ny_block, & if (tmphm(i-1,j+1) .or. tmphm(i,j+1) .or. tmphm(i+1,j+1) .or. & tmphm(i-1,j) .or. tmphm(i,j) .or. tmphm(i+1,j) .or. & tmphm(i-1,j-1) .or. tmphm(i,j-1) .or. tmphm(i+1,j-1) ) then - iceTmask(i,j) = 1 + iceTmask(i,j) = .true. endif - if (.not.Tmask(i,j)) iceTmask(i,j) = 0 + if (.not.Tmask(i,j)) iceTmask(i,j) = .false. enddo enddo @@ -504,8 +514,8 @@ subroutine dyn_prep2 (nx_block, ny_block, & ilo,ihi,jlo,jhi ! beginning and end of physical domain integer (kind=int_kind), intent(out) :: & - icellT , & ! no. of cells where iceTmask = 1 - icellX ! no. of cells where iceXmask = 1 + icellT , & ! no. of cells where iceTmask = .true. + icellX ! no. of cells where iceXmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(out) :: & indxTi , & ! compressed index in i-direction on T grid @@ -516,7 +526,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & Xmask ! land/boundary mask, thickness (X-grid-cell) - integer (kind=int_kind), dimension (nx_block,ny_block), intent(in) :: & + logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & iceTmask ! ice extent mask (T-cell) logical (kind=log_kind), dimension (nx_block,ny_block), intent(inout) :: & @@ -590,7 +600,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & taubx (i,j) = c0 tauby (i,j) = c0 - if (iceTmask(i,j)==0) then + if (.not.iceTmask(i,j)) then stressp_1 (i,j) = c0 stressp_2 (i,j) = c0 stressp_3 (i,j) = c0 @@ -608,7 +618,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & enddo ! j !----------------------------------------------------------------- - ! Identify cells where iceTmask = 1 + ! Identify cells where iceTmask = .true. ! Note: The icellT mask includes north and east ghost cells ! where stresses are needed. !----------------------------------------------------------------- @@ -616,7 +626,7 @@ subroutine dyn_prep2 (nx_block, ny_block, & icellT = 0 do j = jlo, jhi+1 do i = ilo, ihi+1 - if (iceTmask(i,j) == 1) then + if (iceTmask(i,j)) then icellT = icellT + 1 indxTi(icellT) = i indxTj(icellT) = j @@ -729,7 +739,7 @@ subroutine stepu (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellU ! total count when iceumask is true + icellU ! total count when iceUmask is true integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxUi , & ! compressed index in i-direction @@ -1166,7 +1176,7 @@ subroutine dyn_finish (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellU ! total count when iceumask is true + icellU ! total count when iceUmask is true integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxUi , & ! compressed index in i-direction @@ -1635,7 +1645,7 @@ subroutine deformations (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxTi , & ! compressed index in i-direction @@ -1733,7 +1743,7 @@ subroutine deformationsCD_T (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxTi , & ! compressed index in i-direction @@ -1830,7 +1840,7 @@ subroutine deformationsC_T (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & indxTi , & ! compressed index in i-direction diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 2a0dbc4b3..caedecc1e 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -96,14 +96,14 @@ module ice_dyn_vp ! module variables integer (kind=int_kind), allocatable :: & - icellt(:) , & ! no. of cells where icetmask = 1 - icellu(:) ! no. of cells where iceumask = 1 + icellT(:) , & ! no. of cells where iceTmask = .true. + icellU(:) ! no. of cells where iceUmask = .true. integer (kind=int_kind), allocatable :: & - indxti(:,:) , & ! compressed index in i-direction - indxtj(:,:) , & ! compressed index in j-direction - indxui(:,:) , & ! compressed index in i-direction - indxuj(:,:) ! compressed index in j-direction + indxTi(:,:) , & ! compressed index in i-direction + indxTj(:,:) , & ! compressed index in j-direction + indxUi(:,:) , & ! compressed index in i-direction + indxUj(:,:) ! compressed index in j-direction real (kind=dbl_kind), allocatable :: & fld2(:,:,:,:), & ! work array for boundary updates @@ -138,11 +138,11 @@ subroutine init_vp this_block ! block information for current block ! Initialize module variables - allocate(icellt(max_blocks), icellu(max_blocks)) - allocate(indxti(nx_block*ny_block, max_blocks), & - indxtj(nx_block*ny_block, max_blocks), & - indxui(nx_block*ny_block, max_blocks), & - indxuj(nx_block*ny_block, max_blocks)) + allocate(icellT(max_blocks), icellU(max_blocks)) + allocate(indxTi(nx_block*ny_block, max_blocks), & + indxTj(nx_block*ny_block, max_blocks), & + indxUi(nx_block*ny_block, max_blocks), & + indxUj(nx_block*ny_block, max_blocks)) allocate(fld2(nx_block,ny_block,2,max_blocks)) allocate(fld3(nx_block,ny_block,3,max_blocks)) allocate(fld4(nx_block,ny_block,4,max_blocks)) @@ -171,7 +171,7 @@ subroutine implicit_solver (dt) use ice_blocks, only: block, get_block, nx_block, ny_block use ice_domain, only: blocks_ice, halo_info, maskhalo_dyn use ice_domain_size, only: max_blocks, ncat - use ice_dyn_shared, only: deformations + use ice_dyn_shared, only: deformations, iceTmask, iceUmask use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & @@ -181,7 +181,7 @@ subroutine implicit_solver (dt) stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_grid, only: tmask, umask, dxT, dyT, cxp, cyp, cxm, cym, & - tarear, grid_type, grid_average_X2Y, iceumask, & + tarear, grid_type, grid_average_X2Y, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength @@ -226,7 +226,6 @@ subroutine implicit_solver (dt) logical (kind=log_kind) :: calc_strair integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & - icetmask, & ! ice extent mask (T-cell) halomask ! generic halo mask type (ice_halo) :: & @@ -290,13 +289,13 @@ subroutine implicit_solver (dt) ilo, ihi, jlo, jhi, & aice (:,:,iblk), vice (:,:,iblk), & vsno (:,:,iblk), tmask (:,:,iblk), & - tmass (:,:,iblk), icetmask(:,:,iblk)) + tmass (:,:,iblk), iceTmask(:,:,iblk)) enddo ! iblk !$OMP END PARALLEL DO call ice_timer_start(timer_bound) - call ice_HaloUpdate (icetmask, halo_info, & + call ice_HaloUpdate (iceTmask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -359,16 +358,16 @@ subroutine implicit_solver (dt) call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & - icellt(iblk), icellu(iblk), & - indxti (:,iblk), indxtj (:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellT(iblk), icellU(iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), umass (:,:,iblk), & umassdti (:,:,iblk), fcor_blk (:,:,iblk), & umask (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - icetmask (:,:,iblk), iceumask (:,:,iblk), & + iceTmask (:,:,iblk), iceUmask (:,:,iblk), & fmU (:,:,iblk), dt, & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & @@ -387,8 +386,8 @@ subroutine implicit_solver (dt) TbU (:,:,iblk)) call calc_bfix (nx_block , ny_block , & - icellu(iblk) , & - indxui (:,iblk), indxuj (:,iblk), & + icellU(iblk) , & + indxUi (:,iblk), indxUj (:,iblk), & umassdti (:,:,iblk), & forcexU (:,:,iblk), forceyU (:,:,iblk), & uvel_init (:,:,iblk), vvel_init (:,:,iblk), & @@ -399,9 +398,9 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- strength(:,:,iblk) = c0 ! initialize - do ij = 1, icellt(iblk) - i = indxti(ij, iblk) - j = indxtj(ij, iblk) + do ij = 1, icellT(iblk) + i = indxTi(ij, iblk) + j = indxTj(ij, iblk) call icepack_ice_strength (ncat, & aice (i,j, iblk), & vice (i,j, iblk), & @@ -431,7 +430,7 @@ subroutine implicit_solver (dt) if (maskhalo_dyn) then call ice_timer_start(timer_bound) halomask = 0 - where (iceumask) halomask = 1 + where (iceUmask) halomask = 1 call ice_HaloUpdate (halomask, halo_info, & field_loc_center, field_type_scalar) call ice_timer_stop(timer_bound) @@ -446,8 +445,8 @@ subroutine implicit_solver (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call seabed_stress_factor_LKD (nx_block, ny_block, & - icellu (iblk), & - indxui(:,iblk), indxuj(:,iblk), & + icellU (iblk), & + indxUi(:,iblk), indxUj(:,iblk), & vice(:,:,iblk), aice(:,:,iblk), & hwater(:,:,iblk), TbU(:,:,iblk)) enddo @@ -458,8 +457,8 @@ subroutine implicit_solver (dt) do iblk = 1, nblocks call seabed_stress_factor_prob (nx_block, ny_block, & - icellt(iblk), indxti(:,iblk), indxtj(:,iblk), & - icellu(iblk), indxui(:,iblk), indxuj(:,iblk), & + icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & + icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & aicen(:,:,:,iblk), vicen(:,:,:,iblk), & hwater(:,:,iblk), TbU(:,:,iblk)) enddo @@ -475,7 +474,7 @@ subroutine implicit_solver (dt) ntot = 0 do iblk = 1, nblocks - ntot = ntot + icellu(iblk) + ntot = ntot + icellU(iblk) enddo ntot = 2 * ntot ! times 2 because of u and v @@ -484,9 +483,9 @@ subroutine implicit_solver (dt) !----------------------------------------------------------------- ! Start of nonlinear iteration !----------------------------------------------------------------- - call anderson_solver (icellt , icellu , & - indxti , indxtj , & - indxui , indxuj , & + call anderson_solver (icellT , icellU , & + indxTi , indxTj , & + indxUi , indxUj , & aiU , ntot , & uocnU , vocnU , & waterxU , wateryU, & @@ -510,8 +509,8 @@ subroutine implicit_solver (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call stress_vp (nx_block , ny_block , & - icellt(iblk) , & - indxti (:,iblk), indxtj (:,iblk), & + icellT(iblk) , & + indxTi (:,iblk), indxTj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & @@ -533,8 +532,8 @@ subroutine implicit_solver (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call deformations (nx_block , ny_block , & - icellt(iblk) , & - indxti (:,iblk), indxtj (:,iblk), & + icellT(iblk) , & + indxTi (:,iblk), indxTj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & @@ -552,8 +551,8 @@ subroutine implicit_solver (dt) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call calc_seabed_stress (nx_block , ny_block , & - icellu(iblk) , & - indxui (:,iblk), indxuj (:,iblk), & + icellU(iblk) , & + indxUi (:,iblk), indxUj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & Cb (:,:,iblk), & taubxU (:,:,iblk), taubyU (:,:,iblk)) @@ -638,8 +637,8 @@ subroutine implicit_solver (dt) call dyn_finish & (nx_block, ny_block, & - icellu (iblk), Cdn_ocnU(:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocnU(:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & uocnU (:,:,iblk), vocnU (:,:,iblk), & aiU (:,:,iblk), fmU (:,:,iblk), & @@ -674,9 +673,9 @@ end subroutine implicit_solver ! H. F. Walker, “Anderson Acceleration: Algorithms and Implementations” ! [Online]. Available: https://users.wpi.edu/~walker/Papers/anderson_accn_algs_imps.pdf - subroutine anderson_solver (icellt , icellu , & - indxti , indxtj , & - indxui , indxuj , & + subroutine anderson_solver (icellT , icellU , & + indxTi , indxTj , & + indxUi , indxUj , & aiU , ntot , & uocn , vocn , & waterxU , wateryU, & @@ -703,14 +702,14 @@ subroutine anderson_solver (icellt , icellu , & ntot ! size of problem for Anderson integer (kind=int_kind), dimension(max_blocks), intent(in) :: & - icellt , & ! no. of cells where icetmask = 1 - icellu ! no. of cells where iceumask = 1 + icellT , & ! no. of cells where iceTmask = .true. + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj , & ! compressed index in j-direction - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj , & ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & aiU , & ! ice fraction on u-grid @@ -839,8 +838,8 @@ subroutine anderson_solver (icellt , icellu , & vprev_k(:,:,iblk) = vvel(:,:,iblk) call calc_zeta_dPr (nx_block , ny_block , & - icellt (iblk), & - indxti (:,iblk), indxtj (:,iblk), & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & uprev_k (:,:,iblk), vprev_k (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & dxhy (:,:,iblk), dyhx (:,:,iblk), & @@ -851,8 +850,8 @@ subroutine anderson_solver (icellt , icellu , & rep_prs(:,:,iblk,:), stress_Pr (:,:,:)) call calc_vrel_Cb (nx_block , ny_block , & - icellu (iblk), Cdn_ocn (:,:,iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), Cdn_ocn (:,:,iblk), & + indxUi (:,iblk), indxUj (:,iblk), & aiU (:,:,iblk), TbU (:,:,iblk), & uocn (:,:,iblk), vocn (:,:,iblk), & ulin (:,:,iblk), vlin (:,:,iblk), & @@ -860,8 +859,8 @@ subroutine anderson_solver (icellt , icellu , & ! prepare b vector (RHS) call calc_bvec (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & stress_Pr (:,:,:), uarear (:,:,iblk), & waterxU (:,:,iblk), wateryU (:,:,iblk), & bxfix (:,:,iblk), byfix (:,:,iblk), & @@ -870,9 +869,9 @@ subroutine anderson_solver (icellt , icellu , & ! Compute nonlinear residual norm (PDE residual) call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -884,8 +883,8 @@ subroutine anderson_solver (icellt , icellu , & uarear (:,:,iblk) , & Au (:,:,iblk) , Av (:,:,iblk)) call residual_vec (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & bx (:,:,iblk), by (:,:,iblk), & Au (:,:,iblk), Av (:,:,iblk), & Fx (:,:,iblk), Fy (:,:,iblk), & @@ -912,8 +911,8 @@ subroutine anderson_solver (icellt , icellu , & soly = vprev_k call arrays_to_vec (nx_block , ny_block , & nblocks , max_blocks , & - icellu (:), ntot , & - indxui (:,:), indxuj (:,:), & + icellU (:), ntot , & + indxUi (:,:), indxUj (:,:), & uprev_k (:,:,:), vprev_k (:,:,:), & sol (:)) @@ -927,8 +926,8 @@ subroutine anderson_solver (icellt , icellu , & do iblk = 1, nblocks ! first compute diagonal contributions due to rheology term call formDiag_step1 (nx_block , ny_block , & - icellu (iblk) , & - indxui (:,iblk) , indxuj(:,iblk), & + icellU (iblk) , & + indxUi (:,iblk) , indxUj(:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx(:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -937,8 +936,8 @@ subroutine anderson_solver (icellt , icellu , & diag_rheo(:,:,:)) ! second compute the full diagonal call formDiag_step2 (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & diag_rheo (:,:,:), vrel (:,:,iblk), & umassdti (:,:,iblk), & uarear (:,:,iblk), Cb (:,:,iblk), & @@ -961,8 +960,8 @@ subroutine anderson_solver (icellt , icellu , & ! Put FGMRES solution solx,soly in fpfunc vector (needed for Anderson) call arrays_to_vec (nx_block , ny_block , & nblocks , max_blocks , & - icellu (:), ntot , & - indxui (:,:), indxuj (:,:), & + icellU (:), ntot , & + indxUi (:,:), indxUj (:,:), & solx (:,:,:), soly (:,:,:), & fpfunc (:)) elseif (fpfunc_andacc == 2) then @@ -978,15 +977,15 @@ subroutine anderson_solver (icellt , icellu , & #else call vec_to_arrays (nx_block , ny_block , & nblocks , max_blocks , & - icellu (:), ntot , & - indxui (:,:), indxuj(:,:) , & + icellU (:), ntot , & + indxUi (:,:), indxUj(:,:) , & res (:), & fpresx (:,:,:), fpresy (:,:,:)) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call calc_L2norm_squared (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & fpresx(:,:,iblk), fpresy(:,:,iblk), & L2norm (iblk)) enddo @@ -1097,8 +1096,8 @@ subroutine anderson_solver (icellt , icellu , & !----------------------------------------------------------------------- call vec_to_arrays (nx_block , ny_block , & nblocks , max_blocks , & - icellu (:), ntot , & - indxui (:,:), indxuj (:,:), & + icellU (:), ntot , & + indxUi (:,:), indxUj (:,:), & sol (:), & uvel (:,:,:), vvel (:,:,:)) @@ -1121,8 +1120,8 @@ subroutine anderson_solver (icellt , icellu , & fpresx(:,:,iblk) = uvel(:,:,iblk) - uprev_k(:,:,iblk) fpresy(:,:,iblk) = vvel(:,:,iblk) - vprev_k(:,:,iblk) call calc_L2norm_squared (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & fpresx(:,:,iblk), fpresy(:,:,iblk), & L2norm (iblk)) enddo @@ -1142,8 +1141,8 @@ end subroutine anderson_solver ! Computes the viscosities and dPr/dx, dPr/dy subroutine calc_zeta_dPr (nx_block, ny_block, & - icellt , & - indxti , indxtj , & + icellT , & + indxTi , indxTj , & uvel , vvel , & dxT , dyT , & dxhy , dyhx , & @@ -1158,11 +1157,11 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & strength , & ! ice strength (N/m) @@ -1204,14 +1203,14 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & character(len=*), parameter :: subname = '(calc_zeta_dPr)' ! Initialize stPr, zetax2 and etax2 to zero - ! (for cells where icetmask is false) + ! (for cells where iceTmask is false) stPr = c0 zetax2 = c0 etax2 = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1337,8 +1336,8 @@ end subroutine calc_zeta_dPr ! viscous-plastic sea ice stresses, Geosci. Model Dev., 13, 1763–1769, subroutine stress_vp (nx_block , ny_block , & - icellt , & - indxti , indxtj , & + icellT , & + indxTi , indxTj , & uvel , vvel , & dxT , dyT , & cxp , cyp , & @@ -1356,11 +1355,11 @@ subroutine stress_vp (nx_block , ny_block , & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellt ! no. of cells where icetmask = 1 + icellT ! no. of cells where iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel , & ! x-component of velocity (m/s) @@ -1395,9 +1394,9 @@ subroutine stress_vp (nx_block , ny_block , & character(len=*), parameter :: subname = '(stress_vp)' - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1447,8 +1446,8 @@ end subroutine stress_vp ! Compute vrel and seabed stress coefficients subroutine calc_vrel_Cb (nx_block, ny_block, & - icellu , Cw , & - indxui , indxuj , & + icellU , Cw , & + indxUi , indxUj , & aiU , TbU , & uocn , vocn , & uvel , vvel , & @@ -1458,11 +1457,11 @@ subroutine calc_vrel_Cb (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & TbU, & ! seabed stress factor (N/m^2) @@ -1494,9 +1493,9 @@ subroutine calc_vrel_Cb (nx_block, ny_block, & if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ! (magnitude of relative ocean current)*rhow*drag*aice vrel(i,j) = aiU(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uvel(i,j))**2 + & @@ -1512,19 +1511,19 @@ end subroutine calc_vrel_Cb ! Compute seabed stress (diagnostic) subroutine calc_seabed_stress (nx_block, ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & uvel , vvel , & Cb , & taubxU , taubyU) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel , & ! x-component of velocity (m/s) @@ -1542,9 +1541,9 @@ subroutine calc_seabed_stress (nx_block, ny_block, & character(len=*), parameter :: subname = '(calc_seabed_stress)' - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) taubxU(i,j) = -uvel(i,j)*Cb(i,j) taubyU(i,j) = -vvel(i,j)*Cb(i,j) @@ -1559,9 +1558,9 @@ end subroutine calc_seabed_stress ! Av = A(u,v)_[y] * vvel (y components of A(u,v) * (u,v)) subroutine matvec (nx_block, ny_block, & - icellu , icellt , & - indxui , indxuj , & - indxti , indxtj , & + icellU , icellT , & + indxUi , indxUj , & + indxTi , indxTj , & dxT , dyT , & dxhy , dyhx , & cxp , cyp , & @@ -1577,14 +1576,14 @@ subroutine matvec (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu, & ! total count when iceumask is true - icellt ! no. of cells where icetmask = 1 + icellU, & ! total count when iceUmask = .true. + icellT ! total count when iceTmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj , & ! compressed index in j-direction - indxti , & ! compressed index in i-direction - indxtj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj , & ! compressed index in j-direction + indxTi , & ! compressed index in i-direction + indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & dxT , & ! width of T-cell through the middle (m) @@ -1653,9 +1652,9 @@ subroutine matvec (nx_block, ny_block, & str(:,:,:) = c0 - do ij = 1, icellt - i = indxti(ij) - j = indxtj(ij) + do ij = 1, icellT + i = indxTi(ij) + j = indxTj(ij) !----------------------------------------------------------------- ! strain rates @@ -1796,15 +1795,15 @@ subroutine matvec (nx_block, ny_block, & str(i,j,8) = strp_tmp - strm_tmp + str12sn & - dyhx(i,j)*(csigpsw + csigmsw) + dxhy(i,j)*csig12sw - enddo ! ij - icellt + enddo ! ij - icellT !----------------------------------------------------------------- ! Form Au and Av !----------------------------------------------------------------- - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ccaimp = umassdti(i,j) + vrel(i,j) * cosw + Cb(i,j) ! kg/m^2 s @@ -1818,7 +1817,7 @@ subroutine matvec (nx_block, ny_block, & Au(i,j) = ccaimp*uvel(i,j) - ccb*vvel(i,j) - strintx Av(i,j) = ccaimp*vvel(i,j) + ccb*uvel(i,j) - strinty - enddo ! ij - icellu + enddo ! ij - icellU end subroutine matvec @@ -1828,8 +1827,8 @@ end subroutine matvec ! does not depend on (u,v) and thus do not change during the nonlinear iteration subroutine calc_bfix (nx_block , ny_block , & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & umassdti , & forcexU , forceyU , & uvel_init, vvel_init, & @@ -1837,11 +1836,11 @@ subroutine calc_bfix (nx_block , ny_block , & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where iceumask = 1 + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uvel_init,& ! x-component of velocity (m/s), beginning of time step @@ -1861,9 +1860,9 @@ subroutine calc_bfix (nx_block , ny_block , & character(len=*), parameter :: subname = '(calc_bfix)' - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) bxfix(i,j) = umassdti(i,j)*uvel_init(i,j) + forcexU(i,j) byfix(i,j) = umassdti(i,j)*vvel_init(i,j) + forceyU(i,j) @@ -1878,8 +1877,8 @@ end subroutine calc_bfix ! depending on (u,v) subroutine calc_bvec (nx_block, ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & stPr , uarear , & waterxU , wateryU , & bxfix , byfix , & @@ -1888,11 +1887,11 @@ subroutine calc_bvec (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & uarear , & ! 1/uarea @@ -1930,9 +1929,9 @@ subroutine calc_bvec (nx_block, ny_block, & if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ! ice/ocean stress taux = vrel(i,j)*waterxU(i,j) ! NOTE this is not the entire @@ -1958,8 +1957,8 @@ end subroutine calc_bvec ! Av = A(u,v)_[y] * vvel (y components of A(u,v) * (u,v)) subroutine residual_vec (nx_block , ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & bx , by , & Au , Av , & Fx , Fy , & @@ -1967,11 +1966,11 @@ subroutine residual_vec (nx_block , ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & bx , & ! b vector, bx = taux + bxfix (N/m^2) @@ -2001,9 +2000,9 @@ subroutine residual_vec (nx_block , ny_block, & sum_squared = c0 endif - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) Fx(i,j) = bx(i,j) - Au(i,j) Fy(i,j) = by(i,j) - Av(i,j) @@ -2020,8 +2019,8 @@ end subroutine residual_vec ! Part 1: compute the contributions to the diagonal from the rheology term subroutine formDiag_step1 (nx_block, ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & dxT , dyT , & dxhy , dyhx , & cxp , cyp , & @@ -2031,11 +2030,11 @@ subroutine formDiag_step1 (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! no. of cells where icetmask = 1 + icellU ! no. of cells where iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & dxT , & ! width of T-cell through the middle (m) @@ -2191,10 +2190,10 @@ subroutine formDiag_step1 (nx_block, ny_block, & dj = 1 endif - do ij = 1, icellu + do ij = 1, icellU - iu = indxui(ij) - ju = indxuj(ij) + iu = indxUi(ij) + ju = indxUj(ij) i = iu + di j = ju + dj @@ -2395,8 +2394,8 @@ end subroutine formDiag_step1 ! Part 2: compute diagonal subroutine formDiag_step2 (nx_block, ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & Drheo , vrel , & umassdti, & uarear , Cb , & @@ -2404,11 +2403,11 @@ subroutine formDiag_step2 (nx_block, ny_block, & integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & vrel, & ! coefficient for tauw @@ -2454,9 +2453,9 @@ subroutine formDiag_step2 (nx_block, ny_block, & ! Drheo(i,j,7) corresponds to str(i+1,j,7) ! Drheo(i,j,8) corresponds to str(i+1,j+1,8)) - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) ccaimp = umassdti(i,j) + vrel(i,j) * cosw + Cb(i,j) ! kg/m^2 s @@ -2476,18 +2475,18 @@ end subroutine formDiag_step2 ! Compute squared l^2 norm of a grid function (tpu,tpv) subroutine calc_L2norm_squared (nx_block, ny_block, & - icellu , & - indxui , indxuj , & + icellU , & + indxUi , indxUj , & tpu , tpv , & L2norm) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions - icellu ! total count when iceumask is true + icellU ! total count when iceUmask = .true. integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & tpu , & ! x-component of vector grid function @@ -2509,9 +2508,9 @@ subroutine calc_L2norm_squared (nx_block, ny_block, & L2norm = c0 - do ij = 1, icellu - i = indxui(ij) - j = indxuj(ij) + do ij = 1, icellU + i = indxUi(ij) + j = indxUj(ij) L2norm = L2norm + tpu(i,j)**2 + tpv(i,j)**2 enddo ! ij @@ -2524,8 +2523,8 @@ end subroutine calc_L2norm_squared subroutine arrays_to_vec (nx_block, ny_block , & nblocks , max_blocks, & - icellu , ntot , & - indxui , indxuj , & + icellU , ntot , & + indxUi , indxUj , & tpu , tpv , & outvec) @@ -2536,11 +2535,11 @@ subroutine arrays_to_vec (nx_block, ny_block , & ntot ! size of problem for Anderson integer (kind=int_kind), dimension (max_blocks), intent(in) :: & - icellu + icellU integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block, max_blocks), intent(in) :: & tpu , & ! x-component of vector @@ -2564,9 +2563,9 @@ subroutine arrays_to_vec (nx_block, ny_block , & tot = 0 do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) tot = tot + 1 outvec(tot) = tpu(i, j, iblk) tot = tot + 1 @@ -2582,8 +2581,8 @@ end subroutine arrays_to_vec subroutine vec_to_arrays (nx_block, ny_block , & nblocks , max_blocks, & - icellu , ntot , & - indxui , indxuj , & + icellU , ntot , & + indxUi , indxUj , & invec , & tpu , tpv) @@ -2594,11 +2593,11 @@ subroutine vec_to_arrays (nx_block, ny_block , & ntot ! size of problem for Anderson integer (kind=int_kind), dimension (max_blocks), intent(in) :: & - icellu + icellU integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & - indxui , & ! compressed index in i-direction - indxuj ! compressed index in j-direction + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (ntot), intent(in) :: & invec ! input 1D vector @@ -2623,9 +2622,9 @@ subroutine vec_to_arrays (nx_block, ny_block , & tot = 0 do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) tot = tot + 1 tpu(i, j, iblk) = invec(tot) tot = tot + 1 @@ -2813,9 +2812,9 @@ subroutine fgmres (zetax2 , etax2 , & !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -2827,8 +2826,8 @@ subroutine fgmres (zetax2 , etax2 , & uarear (:,:,iblk) , & workspace_x(:,:,iblk) , workspace_y(:,:,iblk)) call residual_vec (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & bx (:,:,iblk), by (:,:,iblk), & workspace_x(:,:,iblk), workspace_y(:,:,iblk), & arnoldi_basis_x (:,:,iblk, 1), & @@ -2842,8 +2841,8 @@ subroutine fgmres (zetax2 , etax2 , & !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call calc_L2norm_squared(nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj(:,iblk), & arnoldi_basis_x(:,:,iblk, 1) , & arnoldi_basis_y(:,:,iblk, 1) , & norm_squared(iblk)) @@ -2866,9 +2865,9 @@ subroutine fgmres (zetax2 , etax2 , & inverse_norm = c1 / norm_residual !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, 1) = arnoldi_basis_x(i, j, iblk, 1) * inverse_norm arnoldi_basis_y(i, j, iblk, 1) = arnoldi_basis_y(i, j, iblk, 1) * inverse_norm @@ -2920,9 +2919,9 @@ subroutine fgmres (zetax2 , etax2 , & !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -2947,8 +2946,8 @@ subroutine fgmres (zetax2 , etax2 , & !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call calc_L2norm_squared(nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:, iblk) , & + icellU (iblk), & + indxUi (:,iblk), indxUj(:, iblk) , & arnoldi_basis_x(:,:,iblk, nextit), & arnoldi_basis_y(:,:,iblk, nextit), & norm_squared(iblk)) @@ -2962,9 +2961,9 @@ subroutine fgmres (zetax2 , etax2 , & inverse_norm = c1 / hessenberg(nextit,initer) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit)*inverse_norm arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit)*inverse_norm @@ -3028,9 +3027,9 @@ subroutine fgmres (zetax2 , etax2 , & t = rhs_hess(it) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) solx(i, j, iblk) = solx(i, j, iblk) + t * orig_basis_x(i, j, iblk, it) soly(i, j, iblk) = soly(i, j, iblk) + t * orig_basis_y(i, j, iblk, it) @@ -3072,9 +3071,9 @@ subroutine fgmres (zetax2 , etax2 , & do it = 1, nextit !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_x(i, j, iblk, it) workspace_y(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_y(i, j, iblk, it) @@ -3206,9 +3205,9 @@ subroutine pgmres (zetax2 , etax2 , & !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -3220,8 +3219,8 @@ subroutine pgmres (zetax2 , etax2 , & uarear (:,:,iblk) , & workspace_x(:,:,iblk) , workspace_y(:,:,iblk)) call residual_vec (nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj (:,iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & bx (:,:,iblk), by (:,:,iblk), & workspace_x(:,:,iblk), workspace_y(:,:,iblk), & arnoldi_basis_x (:,:,iblk, 1), & @@ -3235,8 +3234,8 @@ subroutine pgmres (zetax2 , etax2 , & !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call calc_L2norm_squared(nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:, iblk), & + icellU (iblk), & + indxUi (:,iblk), indxUj(:, iblk), & arnoldi_basis_x(:,:,iblk, 1), & arnoldi_basis_y(:,:,iblk, 1), & norm_squared(iblk)) @@ -3259,9 +3258,9 @@ subroutine pgmres (zetax2 , etax2 , & inverse_norm = c1 / norm_residual !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, 1) = arnoldi_basis_x(i, j, iblk, 1) * inverse_norm arnoldi_basis_y(i, j, iblk, 1) = arnoldi_basis_y(i, j, iblk, 1) * inverse_norm @@ -3302,9 +3301,9 @@ subroutine pgmres (zetax2 , etax2 , & !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call matvec (nx_block , ny_block , & - icellu (iblk) , icellt (iblk), & - indxui (:,iblk) , indxuj (:,iblk), & - indxti (:,iblk) , indxtj (:,iblk), & + icellU (iblk) , icellT (iblk), & + indxUi (:,iblk) , indxUj (:,iblk), & + indxTi (:,iblk) , indxTj (:,iblk), & dxT (:,:,iblk) , dyT (:,:,iblk), & dxhy (:,:,iblk) , dyhx (:,:,iblk), & cxp (:,:,iblk) , cyp (:,:,iblk), & @@ -3329,8 +3328,8 @@ subroutine pgmres (zetax2 , etax2 , & !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks call calc_L2norm_squared(nx_block , ny_block , & - icellu (iblk), & - indxui (:,iblk), indxuj(:, iblk) , & + icellU (iblk), & + indxUi (:,iblk), indxUj(:, iblk) , & arnoldi_basis_x(:,:,iblk, nextit), & arnoldi_basis_y(:,:,iblk, nextit), & norm_squared(iblk)) @@ -3344,9 +3343,9 @@ subroutine pgmres (zetax2 , etax2 , & inverse_norm = c1 / hessenberg(nextit,initer) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit)*inverse_norm arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit)*inverse_norm @@ -3412,9 +3411,9 @@ subroutine pgmres (zetax2 , etax2 , & t = rhs_hess(it) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + t * arnoldi_basis_x(i, j, iblk, it) workspace_y(i, j, iblk) = workspace_y(i, j, iblk) + t * arnoldi_basis_y(i, j, iblk, it) @@ -3468,9 +3467,9 @@ subroutine pgmres (zetax2 , etax2 , & do it = 1, nextit !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_x(i, j, iblk, it) workspace_y(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_y(i, j, iblk, it) @@ -3549,9 +3548,9 @@ subroutine precondition(zetax2 , etax2, & elseif (precond_type == 'diag') then ! Jacobi preconditioner (diagonal) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) wx(i,j,iblk) = vx(i,j,iblk) / diagx(i,j,iblk) wy(i,j,iblk) = vy(i,j,iblk) / diagy(i,j,iblk) @@ -3632,9 +3631,9 @@ subroutine orthogonalize(ortho_type , initer , & !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) local_dot(iblk) = local_dot(iblk) + & (arnoldi_basis_x(i, j, iblk, it) * arnoldi_basis_x(i, j, iblk, nextit)) + & @@ -3652,9 +3651,9 @@ subroutine orthogonalize(ortho_type , initer , & do it = 1, initer !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit) & - hessenberg(it,initer) * arnoldi_basis_x(i, j, iblk, it) @@ -3671,9 +3670,9 @@ subroutine orthogonalize(ortho_type , initer , & !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) local_dot(iblk) = local_dot(iblk) + & (arnoldi_basis_x(i, j, iblk, it) * arnoldi_basis_x(i, j, iblk, nextit)) + & @@ -3686,9 +3685,9 @@ subroutine orthogonalize(ortho_type , initer , & !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks - do ij = 1, icellu(iblk) - i = indxui(ij, iblk) - j = indxuj(ij, iblk) + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit) & - hessenberg(it,initer) * arnoldi_basis_x(i, j, iblk, it) diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 index 2b64f8932..9fda67dad 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 @@ -81,6 +81,7 @@ module ice_boundary module procedure ice_HaloUpdate2DR8, & ice_HaloUpdate2DR4, & ice_HaloUpdate2DI4, & + ice_HaloUpdate2DL1, & ice_HaloUpdate3DR8, & ice_HaloUpdate3DR4, & ice_HaloUpdate3DI4, & @@ -2384,6 +2385,69 @@ subroutine ice_HaloUpdate2DI4(array, halo, & end subroutine ice_HaloUpdate2DI4 +!*********************************************************************** + + subroutine ice_HaloUpdate2DL1(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 2d horizontal logical arrays. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + integer (int_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + logical (log_kind), dimension(:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:,:,:), allocatable :: & + iarray ! integer array for logical + + character(len=*), parameter :: subname = '(ice_HaloUpdate2DL1)' + +!----------------------------------------------------------------------- +! +! copy logical into integer array and call haloupdate on integer array +! +!----------------------------------------------------------------------- + + allocate(iarray(size(array,dim=1),size(array,dim=2),size(array,dim=3))) + iarray(:,:,:) = 0 + where (array) iarray = 1 + + call ice_HaloUpdate(iarray, halo, & + fieldLoc, fieldKind, & + fillValue) + + array = .false. + where (iarray /= 0) array = .true. + deallocate(iarray) + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate2DL1 + !*********************************************************************** subroutine ice_HaloUpdate3DR8(array, halo, & diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 index cafe4dc05..f10a9f432 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 @@ -68,6 +68,7 @@ module ice_boundary module procedure ice_HaloUpdate2DR8, & ice_HaloUpdate2DR4, & ice_HaloUpdate2DI4, & + ice_HaloUpdate2DL1, & ice_HaloUpdate3DR8, & ice_HaloUpdate3DR4, & ice_HaloUpdate3DI4, & @@ -1541,6 +1542,69 @@ subroutine ice_HaloUpdate2DI4(array, halo, & end subroutine ice_HaloUpdate2DI4 +!*********************************************************************** + + subroutine ice_HaloUpdate2DL1(array, halo, & + fieldLoc, fieldKind, & + fillValue) + +! This routine updates ghost cells for an input array and is a +! member of a group of routines under the generic interface +! ice\_HaloUpdate. This routine is the specific interface +! for 2d horizontal logical arrays. + + type (ice_halo), intent(in) :: & + halo ! precomputed halo structure containing all + ! information needed for halo update + + integer (int_kind), intent(in) :: & + fieldKind, &! id for type of field (scalar, vector, angle) + fieldLoc ! id for location on horizontal grid + ! (center, NEcorner, Nface, Eface) + + integer (int_kind), intent(in), optional :: & + fillValue ! optional value to put in ghost cells + ! where neighbor points are unknown + ! (e.g. eliminated land blocks or + ! closed boundaries) + + logical (log_kind), dimension(:,:,:), intent(inout) :: & + array ! array containing field for which halo + ! needs to be updated + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:,:,:), allocatable :: & + iarray ! integer array for logical + + character(len=*), parameter :: subname = '(ice_HaloUpdate2DL1)' + +!----------------------------------------------------------------------- +! +! copy logical into integer array and call haloupdate on integer array +! +!----------------------------------------------------------------------- + + allocate(iarray(size(array,dim=1),size(array,dim=2),size(array,dim=3))) + iarray(:,:,:) = 0 + where (array) iarray = 1 + + call ice_HaloUpdate(iarray, halo, & + fieldLoc, fieldKind, & + fillValue) + + array = .false. + where (iarray /= 0) array = .true. + deallocate(iarray) + +!----------------------------------------------------------------------- + + end subroutine ice_HaloUpdate2DL1 + !*********************************************************************** subroutine ice_HaloUpdate3DR8(array, halo, & diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index a7d8c2357..b7082bf93 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -192,11 +192,6 @@ module ice_grid lmask_n, & ! northern hemisphere mask lmask_s ! southern hemisphere mask - logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & - iceumask, & ! ice extent mask (U-cell) - icenmask, & ! ice extent mask (N-cell) - iceemask ! ice extent mask (E-cell) - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & rndex_global ! global index for local subdomain (dbl) @@ -277,7 +272,6 @@ subroutine alloc_grid umaskCD (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) nmask (nx_block,ny_block,max_blocks), & ! land/boundary mask (N-cell) emask (nx_block,ny_block,max_blocks), & ! land/boundary mask (E-cell) - iceumask (nx_block,ny_block,max_blocks), & ! u mask for dynamics lmask_n (nx_block,ny_block,max_blocks), & ! northern hemisphere mask lmask_s (nx_block,ny_block,max_blocks), & ! southern hemisphere mask rndex_global(nx_block,ny_block,max_blocks), & ! global index for local subdomain (dbl) @@ -298,8 +292,6 @@ subroutine alloc_grid if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate( & - iceemask (nx_block,ny_block,max_blocks), & ! e mask for dynamics - icenmask (nx_block,ny_block,max_blocks), & ! n mask for dynamics ratiodxN (nx_block,ny_block,max_blocks), & ratiodyE (nx_block,ny_block,max_blocks), & ratiodxNr(nx_block,ny_block,max_blocks), & diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index cfc44d987..2c7d3d63c 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -55,6 +55,7 @@ subroutine dumpfile(filename_spec) use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks use ice_domain_size, only: nilyr, nslyr, ncat, max_blocks + use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & @@ -63,7 +64,7 @@ subroutine dumpfile(filename_spec) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: grid_ice, tmask, iceumask, iceemask, icenmask + use ice_grid, only: grid_ice, tmask use ice_state, only: aicen, vicen, vsnon, trcrn, uvel, vvel, & uvelE, vvelE, uvelN, vvelN @@ -220,7 +221,7 @@ subroutine dumpfile(filename_spec) do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = c0 - if (iceumask(i,j,iblk)) work1(i,j,iblk) = c1 + if (iceUmask(i,j,iblk)) work1(i,j,iblk) = c1 enddo enddo enddo @@ -234,7 +235,7 @@ subroutine dumpfile(filename_spec) do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = c0 - if (icenmask(i,j,iblk)) work1(i,j,iblk) = c1 + if (iceNmask(i,j,iblk)) work1(i,j,iblk) = c1 enddo enddo enddo @@ -246,7 +247,7 @@ subroutine dumpfile(filename_spec) do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = c0 - if (iceemask(i,j,iblk)) work1(i,j,iblk) = c1 + if (iceEmask(i,j,iblk)) work1(i,j,iblk) = c1 enddo enddo enddo @@ -276,6 +277,7 @@ subroutine restartfile (ice_ic) use ice_domain, only: nblocks, halo_info use ice_domain_size, only: nilyr, nslyr, ncat, & max_blocks + use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & @@ -284,8 +286,7 @@ subroutine restartfile (ice_ic) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_flux, only: coszen - use ice_grid, only: tmask, grid_type, grid_ice, & - iceumask, iceemask, icenmask, grid_average_X2Y + use ice_grid, only: tmask, grid_type, grid_ice, grid_average_X2Y use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & uvelE, vvelE, uvelN, vvelN, uvelT, vvelT, & @@ -532,12 +533,12 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,work1,'ruf8', & 'iceumask',1,diag,field_loc_center, field_type_scalar) - iceumask(:,:,:) = .false. + iceUmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceumask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceUmask(i,j,iblk) = .true. enddo enddo enddo @@ -549,12 +550,12 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,work1,'ruf8', & 'icenmask',1,diag,field_loc_center, field_type_scalar) - icenmask(:,:,:) = .false. + iceNmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) icenmask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceNmask(i,j,iblk) = .true. enddo enddo enddo @@ -565,12 +566,12 @@ subroutine restartfile (ice_ic) call read_restart_field(nu_restart,0,work1,'ruf8', & 'iceemask',1,diag,field_loc_center, field_type_scalar) - iceemask(:,:,:) = .false. + iceEmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceemask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceEmask(i,j,iblk) = .true. enddo enddo enddo @@ -710,13 +711,14 @@ subroutine restartfile_v4 (ice_ic) use ice_domain, only: nblocks, distrb_info use ice_domain_size, only: nilyr, nslyr, ncat, nx_global, ny_global, & max_blocks + use ice_dyn_shared, only: iceUmask use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 use ice_gather_scatter, only: scatter_global_stress - use ice_grid, only: tmask, iceumask + use ice_grid, only: tmask use ice_read_write, only: ice_open, ice_read, ice_read_global use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & @@ -945,12 +947,12 @@ subroutine restartfile_v4 (ice_ic) call ice_read(nu_restart,0,work1,'ruf8',diag, & field_loc_center, field_type_scalar) - iceumask(:,:,:) = .false. + iceUmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceumask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceUmask(i,j,iblk) = .true. enddo enddo enddo diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 99679e791..9dc2a06c4 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -354,10 +354,10 @@ either Celsius or Kelvin units). "icells", "number of grid cells with specified property (for vectorization)", "" "iceruf", "ice surface roughness at atmosphere interface", "5.\ :math:`\times`\ 10\ :math:`^{-4}` m" "iceruf_ocn", "under-ice roughness (at ocean interface)", "0.03 m" - "iceemask", "ice extent mask (E-cell)", "" - "icenmask", "ice extent mask (N-cell)", "" - "icetmask", "ice extent mask (T-cell)", "" - "iceumask", "ice extent mask (U-cell)", "" + "iceEmask", "dynamics ice extent mask (E-cell)", "" + "iceNmask", "dynamics ice extent mask (N-cell)", "" + "iceTmask", "dynamics ice extent mask (T-cell)", "" + "iceUmask", "dynamics ice extent mask (U-cell)", "" "idate", "the date at the end of the current time step (yyyymmdd)", "" "idate0", "initial date", "" "ierr", "general-use error flag", "" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 4bcfe1ede..a7cc66948 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -426,16 +426,16 @@ respectively) are useful in conditional statements. In addition to the land masks, two other masks are implemented in *dyn\_prep* in order to reduce the dynamics component’s work on a global -grid. At each time step the logical masks ``icetmask`` and ``iceumask`` are +grid. At each time step the logical masks ``iceTmask`` and ``iceUmask`` are determined from the current ice extent, such that they have the value “true” wherever ice exists. They also include a border of cells around the ice pack for numerical purposes. These masks are used in the dynamics component to prevent unnecessary calculations on grid points where there is no ice. They are not used in the thermodynamics component, so that ice may form in previously ice-free cells. Like the -land masks ``hm`` and ``uvm``, the ice extent masks ``icetmask`` and ``iceumask`` +land masks ``hm`` and ``uvm``, the ice extent masks ``iceTmask`` and ``iceUmask`` are for T-cells and U-cells, respectively. Note that the ice extent masks -``iceemask`` and ``icenmask`` are also defined when using the C or CD grid. +``iceEmask`` and ``iceNmask`` are also defined when using the C or CD grid. Improved parallel performance may result from utilizing halo masks for boundary updates of the full ice state, incremental remapping transport, From 16b78da0b21d5440063eb7c3e53d78efe4764136 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Thu, 20 Oct 2022 18:23:47 -0400 Subject: [PATCH 29/57] ice_dyn_vp: allow for bit-for-bit reproducibility under `bfbflag` (#774) * doc: fix typo in index (bfbflag) * doc: correct default value of 'maxits_nonlin' The "Table of namelist options" in the user guide lists 'maxits_nonlin' as having a default value of 1000, whereas its actual default is 4, both in the namelist and in 'ice_init.F90'. This has been the case since the original implementation of the implicit solver in f7fd063 (dynamics: add implicit VP solver (#491), 2020-09-22). Fix the documentation. * doc: VP solver is validated with OpenMP When the implicit VP solver was added in f7fd063 (dynamics: add implicit VP solver (#491), 2020-09-22), it had not yet been tested with OpenMP enabled. The OpenMP implementation was carefully reviewed and then fixed in d1e972a (Update OMP (#680), 2022-02-18), which lead to all runs of the 'decomp' suite completing and all restart tests passing. The 'bfbcomp' tests are still failing, but this is due to the code not using the CICE global sum implementation correctly, which will be fixed in the next commits. Update the documentation accordingly. * ice_dyn_vp: activate OpenMP in 'dyn_prep2' loop When the OpenMP implementation was reviewed and fixed in d1e972a (Update OMP (#680), 2022-02-18), the 'PRIVATE' clause of the OpenMP directive for the loop where 'dyn_prep2' is called in 'implicit_solver' was corrected in line with what was done in 'ice_dyn_evp', but OpenMP was left unactivated for this loop (the 'TCXOMP' was not changed to a real 'OMP' directive). Activate OpenMP for this loop. All runs and restart tests of the 'decomp_suite' still pass with this change. * machines: eccc : add ICE_MACHINE_MAXRUNLENGTH to ppp[56] * machines: eccc: use PBS-enabled OpenMPI for 'ppp6_gnu' The system installation of OpenMPI at /usr/mpi/gcc/openmpi-4.1.2a1/ is not compiled with support for PBS. This leads to failures as the MPI runtime does not have the same view of the number of available processors as the job scheduler. Use our own build of OpenMPI, compiled with PBS support, for the 'ppp6_gnu' environment, which uses OpenMPI. * machines: eccc: set I_MPI_FABRICS=ofi Intel MPI 2021.5.1, which comes with oneAPI 2022.1.2, seems to have an intermittent bug where a call to 'MPI_Waitall' fails with: Abort(17) on node 0 (rank 0 in comm 0): Fatal error in PMPI_Waitall: See the MPI_ERROR field in MPI_Status for the error code and no core dump is produced. This affects at least these cases of the 'decomp' suite: - *_*_restart_gx3_16x2x1x1x800_droundrobin - *_*_restart_gx3_16x2x2x2x200_droundrobin This was reported to Intel and they suggested setting the variable 'I_MPI_FABRICS' to 'ofi' (the default being 'shm:ofi' [1]). This disables shared memory transport and indeeds fixes the failures. Set this variable for all ECCC machine files using Intel MPI. [1] https://www.intel.com/content/www/us/en/develop/documentation/mpi-developer-reference-linux/top/environment-variable-reference/environment-variables-for-fabrics-control/communication-fabrics-control.html * machines: eccc: set I_MPI_CBWR for BASEGEN/BASECOM runs Intel MPI, in contrast to OpenMPI (as far as I was able to test, and see [1], [2]), does not (by default) guarantee that repeated runs of the same code on the same machine with the same number of MPI ranks yield the same results when collective operations (e.g. 'MPI_ALLREDUCE') are used. Since the VP solver uses MPI_ALLREDUCE in its algorithm, this leads to repeated runs of the code giving different answers, and baseline comparing runs with code built from the same commit failing. When generating a baseline or comparing against an existing baseline, set the environment variable 'I_MPI_CBWR' to 1 for ECCC machine files using Intel MPI [3], so that (processor) topology-aware collective algorithms are not used and results are reproducible. Note that we do not need to set this variable on robert or underhill, on which jobs have exclusive node access and thus job placement (on processors) is guaranteed to be reproducible. [1] https://stackoverflow.com/a/45916859/ [2] https://scicomp.stackexchange.com/a/2386/ [3] https://www.intel.com/content/www/us/en/develop/documentation/mpi-developer-reference-linux/top/environment-variable-reference/i-mpi-adjust-family-environment-variables.html#i-mpi-adjust-family-environment-variables_GUID-A5119508-5588-4CF5-9979-8D60831D1411 * ice_dyn_vp: fgmres: exit early if right-hand-side vector is zero If starting a run with with "ice_ic='none'" (no ice), the linearized problem for the ice velocity A x = b will have b = 0, since all terms in the right hand side vector will be zero: - strint[xy] is zero because the velocity is zero - tau[xy] is zero because the ocean velocity is also zero - [uv]vel_init is zero - strair[xy] is zero because the concentration is zero - strtlt[xy] is zero because the ocean velocity is zero We thus have a linear system A x = b with b=0, so we must have x=0. In the FGMRES linear solver, this special case is not taken into account, and so we end up with an all-zero initial residual since workspace_[xy] is also zero because of the all-zero initial guess 'sol[xy]', which corresponds to the initial ice velocity. This then leads to a division by zero when normalizing the first Arnoldi vector. Fix this special case by computing the norm of the right-hand-side vector before starting the iterations, and exiting early if it is zero. This is in line with the GMRES implementation in SciPy [1]. [1] https://github.com/scipy/scipy/blob/651a9b717deb68adde9416072c1e1d5aa14a58a1/scipy/sparse/linalg/_isolve/iterative.py#L620-L628 Close: https://github.com/phil-blain/CICE/issues/42 * ice_dyn_vp: add global_norm, global_dot_product functions The VP solver uses a linear solver, FGMRES, as part of the non-linear iteration. The FGMRES algorithm involves computing the norm of a distributed vector field, thus performing global sums. These norms are computed by first summing the squared X and Y components of a vector field in subroutine 'calc_L2norm_squared', summing these over the local blocks, and then doing a global (MPI) sum using 'global_sum'. This approach does not lead to reproducible results when the MPI distribution, or the number of local blocks, is changed, for reasons explained in the "Reproducible sums" section of the Developer Guide (mostly, floating point addition is not associative). This was partly pointed out in [1] but I failed to realize it at the time. Introduce a new function, 'global_dot_product', to encapsulate the computation of the dot product of two grid vectors, each split into two arrays (for the X and Y components). Compute the reduction locally as is done in 'calc_L2norm_squared', but throw away the result and use the existing 'global_sum' function when 'bfbflag' is active, passing it the temporary array used to compute the element-by-element product. This approach avoids a performance regression from the added work done in 'global_sum', such that non-bfbflag runs are as fast as before. Note that since 'global_sum' loops on the whole array (and not just ice points as 'global_dot_product'), make sure to zero-initialize the 'prod' local array. Also add a 'global_norm' function implemented using 'global_dot_product'. Both functions will be used in subsequent commits to ensure bit-for-bit reproducibility. * ice_dyn_vp: use global_{norm,dot_product} for bit-for-bit output reproducibility Make the results of the VP solver reproducible if desired by refactoring the code to use the subroutines 'global_norm' and 'global_dot_product' added in the previous commit. The same pattern appears in the FGMRES solver (subroutine 'fgmres'), the preconditioner 'pgmres' which uses the same algorithm, and the Classical and Modified Gram-Schmidt algorithms in 'orthogonalize'. These modifications do not change the number of global sums in the fgmres, pgmres and the MGS algorithm. For the CGS algorithm, there is (in theory) a slight performance impact as 'global_dot_product' is called inside the loop, whereas previously we called 'global_allreduce_sum' after the loop to compute all 'initer' sums at the same time. To keep that optimization, we would have to implement a new interface 'global_allreduce_sum' which would take an array of shape (nx_block,ny_block,max_blocks,k) and sum over their first three dimensions before performing the global reduction over the k dimension. We choose to not go that route for now mostly because anyway the CGS algorithm is (by default) only used for the PGMRES preconditioner, and so the cost should be relatively low as 'initer' corresponds to 'dim_pgmres' in the namelist, which should be kept low for efficiency (default 5). These changes lead to bit-for-bit reproducibility (the decomp_suite passes) when using 'precond=ident' and 'precond=diag' along with 'bfbflag=reprosum'. 'precond=pgmres' is still not bit-for-bit because some halo updates are skipped for efficiency. This will be addressed in a following commit. [1] https://github.com/CICE-Consortium/CICE/pull/491#discussion_r460147629 * ice_dyn_vp: do not skip halo updates in 'pgmres' under 'bfbflag' The 'pgmres' subroutine implements a separate GMRES solver and is used as a preconditioner for the FGMRES linear solver. Since it is only a preconditioner, it was decided to skip the halo updates after computing the matrix-vector product (in 'matvec'), for efficiency. This leads to non-reproducibility since the content of the non-updated halos depend on the block / MPI distribution. Add the required halo updates, but only perform them when we are explicitely asking for bit-for-bit global sums, i.e. when 'bfbflag' is set to something else than 'not'. Adjust the interfaces of 'pgmres' and 'precondition' (from which 'pgmres' is called) to accept 'halo_info_mask', since it is needed for masked updates. Closes https://github.com/CICE-Consortium/CICE/issues/518 * ice_dyn_vp: use global_{norm,dot_product} for bit-for-bit log reproducibility In the previous commits we ensured bit-for-bit reproducibility of the outputs when using the VP solver. Some global norms computed during the nonlinear iteration still use the same non-reproducible pattern of summing over blocks locally before performing the reduction. However, these norms are used only to monitor the convergence in the log file, as well as to exit the iteration when the required convergence level is reached ('nlres_norm'). Only 'nlres_norm' could (in theory) influence the output, but it is unlikely that a difference due to floating point errors would influence the 'if (nlres_norm < tol_nl)' condition used to exist the nonlinear iteration. Change these remaining cases to also use 'global_norm', leading to bit-for-bit log reproducibility. * ice_dyn_vp: remove unused subroutine and cleanup interfaces The previous commit removed the last caller of 'calc_L2norm_squared'. Remove the subroutine. Also, do not compute 'sum_squared' in 'residual_vec', since the variable 'L2norm' which receives this value is also unused in 'anderson_solver' since the previous commit. Remove that variable, and adjust the interface of 'residual_vec' accordingly. * ice_global_reductions: remove 'global_allreduce_sum' In a previous commit, we removed the sole caller of 'global_allreduce_sum' (in ice_dyn_vp::orthogonalize). We do not anticipate that function to be ued elsewhere in the code, so remove it from ice_global_reductions. Update the 'sumchk' unit test accordingly. * doc: mention VP solver is only reproducible using 'bfbflag' The previous commits made sure that the model outputs as well as the log file output are bit-for-bit reproducible when using the VP solver by refactoring the code to use the existing 'global_sum' subroutine. Add a note in the documentation mentioning that 'bfbflag' is required to get bit-for-bit reproducible results under different decompositions / MPI counts when using the VP solver. Also, adjust the doc about 'bfbflag=lsum8' being the same as 'bfbflag=off' since this is not the case for the VP solver: in the first case we use the scalar version of 'global_sum', in the second case we use the array version. * ice_dyn_vp: improve default parameters for VP solver During QC testing of the previous commit, the 5 years QC test with the updated VP solver failed twice with "bad departure points" after a few years of simulation. Simply bumping the number of nonlinear iterations (maxits_nonlin) from 4 to 5 makes these failures disappear and allow the simulations to run to completion, suggesting the solution is not converged enough with 4 iterations. We also noticed that in these failing cases, the relative tolerance for the linear solver (reltol_fmgres = 1E-2) is too small to be reached in less than 50 iterations (maxits_fgmres), and that's the case at each nonlinear iteration. Other papers mention a relative tolerance of 1E-1 for the linear solver, and using this value also allows both cases to run to completion (even without changing maxits_nonlin). Let's set the default tolerance for the linear solver to 1E-1, and let's be conservative and bump the number of nonlinear iterations to 10. This should give us a more converged solution and add robustness to the default settings. --- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 351 ++++++++++-------- cicecore/cicedynB/general/ice_init.F90 | 4 +- .../comm/mpi/ice_global_reductions.F90 | 69 ---- .../comm/serial/ice_global_reductions.F90 | 69 ---- cicecore/drivers/unittest/sumchk/sumchk.F90 | 64 ---- configuration/scripts/ice_in | 4 +- configuration/scripts/machines/env.ppp5_intel | 7 + configuration/scripts/machines/env.ppp6_gnu | 3 +- .../scripts/machines/env.ppp6_gnu-impi | 7 + configuration/scripts/machines/env.ppp6_intel | 7 + .../scripts/machines/env.ppp6_intel19 | 7 + .../scripts/machines/env.robert_intel | 2 + .../scripts/machines/env.underhill_intel | 2 + doc/source/cice_index.rst | 2 +- doc/source/developer_guide/dg_other.rst | 8 +- doc/source/science_guide/sg_dynamics.rst | 3 +- doc/source/user_guide/ug_case_settings.rst | 4 +- 17 files changed, 240 insertions(+), 373 deletions(-) diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index caedecc1e..6534e7568 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -51,7 +51,7 @@ module ice_dyn_vp seabed_stress, Ktens, stack_fields, unstack_fields use ice_fileunits, only: nu_diag use ice_flux, only: fmU - use ice_global_reductions, only: global_sum, global_allreduce_sum + use ice_global_reductions, only: global_sum use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, uarear use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted @@ -341,9 +341,7 @@ subroutine implicit_solver (dt) call grid_average_X2Y('F',strairyT,'T',strairyU,'U') endif -! tcraig, tcx, threading here leads to some non-reproducbile results and failures in icepack_ice_strength -! need to do more debugging - !$TCXOMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) do iblk = 1, nblocks !----------------------------------------------------------------- @@ -411,7 +409,7 @@ subroutine implicit_solver (dt) enddo ! ij enddo ! iblk - !$TCXOMP END PARALLEL DO + !$OMP END PARALLEL DO call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -771,9 +769,6 @@ subroutine anderson_solver (icellT , icellU , & stress_Pr, & ! x,y-derivatives of the replacement pressure diag_rheo ! contributions of the rhelogy term to the diagonal - real (kind=dbl_kind), dimension (max_blocks) :: & - L2norm ! array used to compute l^2 norm of grid function - real (kind=dbl_kind), dimension (ntot) :: & res , & ! current residual res_old , & ! previous residual @@ -809,7 +804,6 @@ subroutine anderson_solver (icellT , icellU , & ! Initialization res_num = 0 - L2norm = c0 !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -887,11 +881,13 @@ subroutine anderson_solver (icellT , icellU , & indxUi (:,iblk), indxUj (:,iblk), & bx (:,:,iblk), by (:,:,iblk), & Au (:,:,iblk), Av (:,:,iblk), & - Fx (:,:,iblk), Fy (:,:,iblk), & - L2norm (iblk)) + Fx (:,:,iblk), Fy (:,:,iblk)) enddo !$OMP END PARALLEL DO - nlres_norm = sqrt(global_sum(sum(L2norm), distrb_info)) + nlres_norm = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + Fx , Fy ) if (my_task == master_task .and. monitor_nonlin) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & " nonlin_res_L2norm= ", nlres_norm @@ -981,16 +977,10 @@ subroutine anderson_solver (icellT , icellU , & indxUi (:,:), indxUj(:,:) , & res (:), & fpresx (:,:,:), fpresy (:,:,:)) - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - fpresx(:,:,iblk), fpresy(:,:,iblk), & - L2norm (iblk)) - enddo - !$OMP END PARALLEL DO - fpres_norm = sqrt(global_sum(sum(L2norm), distrb_info)) + fpres_norm = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + fpresx , fpresy ) #endif if (my_task == master_task .and. monitor_nonlin) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & @@ -1119,14 +1109,12 @@ subroutine anderson_solver (icellT , icellU , & do iblk = 1, nblocks fpresx(:,:,iblk) = uvel(:,:,iblk) - uprev_k(:,:,iblk) fpresy(:,:,iblk) = vvel(:,:,iblk) - vprev_k(:,:,iblk) - call calc_L2norm_squared (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - fpresx(:,:,iblk), fpresy(:,:,iblk), & - L2norm (iblk)) enddo !$OMP END PARALLEL DO - prog_norm = sqrt(global_sum(sum(L2norm), distrb_info)) + prog_norm = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + fpresx , fpresy ) if (my_task == master_task .and. monitor_nonlin) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & " progress_res_L2norm= ", prog_norm @@ -1961,8 +1949,7 @@ subroutine residual_vec (nx_block , ny_block, & indxUi , indxUj , & bx , by , & Au , Av , & - Fx , Fy , & - sum_squared) + Fx , Fy ) integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1982,9 +1969,6 @@ subroutine residual_vec (nx_block , ny_block, & Fx , & ! x residual vector, Fx = bx - Au (N/m^2) Fy ! y residual vector, Fy = by - Av (N/m^2) - real (kind=dbl_kind), intent(out), optional :: & - sum_squared ! sum of squared residual vector components - ! local variables integer (kind=int_kind) :: & @@ -1993,22 +1977,15 @@ subroutine residual_vec (nx_block , ny_block, & character(len=*), parameter :: subname = '(residual_vec)' !----------------------------------------------------------------- - ! compute residual and sum its squared components + ! compute residual !----------------------------------------------------------------- - if (present(sum_squared)) then - sum_squared = c0 - endif - do ij = 1, icellU i = indxUi(ij) j = indxUj(ij) Fx(i,j) = bx(i,j) - Au(i,j) Fy(i,j) = by(i,j) - Av(i,j) - if (present(sum_squared)) then - sum_squared = sum_squared + Fx(i,j)**2 + Fy(i,j)**2 - endif enddo ! ij end subroutine residual_vec @@ -2472,50 +2449,121 @@ end subroutine formDiag_step2 !======================================================================= -! Compute squared l^2 norm of a grid function (tpu,tpv) +! Compute global l^2 norm of a vector field (field_x, field_y) - subroutine calc_L2norm_squared (nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - tpu , tpv , & - L2norm) + function global_norm (nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + field_x , field_y ) & + result(norm) + + use ice_domain, only: distrb_info + use ice_domain_size, only: max_blocks integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU ! total count when iceUmask = .true. + nx_block, ny_block ! block dimensions - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & + integer (kind=int_kind), dimension (max_blocks), intent(in) :: & + icellU ! total count when iceumask is true + + integer (kind=int_kind), dimension (nx_block*ny_block,max_blocks), intent(in) :: & indxUi , & ! compressed index in i-direction indxUj ! compressed index in j-direction - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - tpu , & ! x-component of vector grid function - tpv ! y-component of vector grid function + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & + field_x , & ! x-component of vector field + field_y ! y-component of vector field - real (kind=dbl_kind), intent(out) :: & - L2norm ! squared l^2 norm of vector grid function (tpu,tpv) + real (kind=dbl_kind) :: & + norm ! l^2 norm of vector field ! local variables integer (kind=int_kind) :: & - i, j, ij + i, j, ij, iblk - character(len=*), parameter :: subname = '(calc_L2norm_squared)' + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + squared ! temporary array for summed squared components - !----------------------------------------------------------------- - ! compute squared l^2 norm of vector grid function (tpu,tpv) - !----------------------------------------------------------------- + character(len=*), parameter :: subname = '(global_norm)' - L2norm = c0 + norm = sqrt(global_dot_product (nx_block , ny_block , & + icellU , & + indxUi , indxUj , & + field_x , field_y , & + field_x , field_y )) - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) + end function global_norm - L2norm = L2norm + tpu(i,j)**2 + tpv(i,j)**2 - enddo ! ij +!======================================================================= + +! Compute global dot product of two grid vectors, each split into X and Y components + + function global_dot_product (nx_block , ny_block , & + icellU , & + indxUi , indxUj , & + vector1_x , vector1_y, & + vector2_x , vector2_y) & + result(dot_product) + + use ice_domain, only: distrb_info + use ice_domain_size, only: max_blocks + use ice_fileunits, only: bfbflag + + integer (kind=int_kind), intent(in) :: & + nx_block, ny_block ! block dimensions + + integer (kind=int_kind), dimension (max_blocks), intent(in) :: & + icellU ! total count when iceumask is true + + integer (kind=int_kind), dimension (nx_block*ny_block,max_blocks), intent(in) :: & + indxUi , & ! compressed index in i-direction + indxUj ! compressed index in j-direction + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & + vector1_x , & ! x-component of first vector + vector1_y , & ! y-component of first vector + vector2_x , & ! x-component of second vector + vector2_y ! y-component of second vector + + real (kind=dbl_kind) :: & + dot_product ! l^2 norm of vector field + + ! local variables + + integer (kind=int_kind) :: & + i, j, ij, iblk + + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & + prod ! temporary array + + real (kind=dbl_kind), dimension(max_blocks) :: & + dot ! temporary scalar for accumulating the result + + character(len=*), parameter :: subname = '(global_dot_product)' + + prod = c0 + dot = c0 + + !$OMP PARALLEL DO PRIVATE(i, j, ij, iblk) + do iblk = 1, nblocks + do ij = 1, icellU(iblk) + i = indxUi(ij, iblk) + j = indxUj(ij, iblk) + prod(i,j,iblk) = vector1_x(i,j,iblk)*vector2_x(i,j,iblk) + vector1_y(i,j,iblk)*vector2_y(i,j,iblk) + dot(iblk) = dot(iblk) + prod(i,j,iblk) + enddo ! ij + enddo + !$OMP END PARALLEL DO - end subroutine calc_L2norm_squared + ! Use local summation result unless bfbflag is active + if (bfbflag == 'off') then + dot_product = global_sum(sum(dot), distrb_info) + else + dot_product = global_sum(prod, distrb_info, field_loc_NEcorner) + endif + + end function global_dot_product !======================================================================= @@ -2768,6 +2816,7 @@ subroutine fgmres (zetax2 , etax2 , & real (kind=dbl_kind) :: & norm_residual , & ! current L^2 norm of residual vector inverse_norm , & ! inverse of the norm of a vector + norm_rhs , & ! L^2 norm of right-hand-side vector nu, t ! local temporary values integer (kind=int_kind) :: & @@ -2807,6 +2856,17 @@ subroutine fgmres (zetax2 , etax2 , & arnoldi_basis_x = c0 arnoldi_basis_y = c0 + ! solution is zero if RHS is zero + norm_rhs = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + bx , by ) + if (norm_rhs == c0) then + solx = bx + soly = by + return + endif + ! Residual of the initial iterate !$OMP PARALLEL DO PRIVATE(iblk) @@ -2838,18 +2898,11 @@ subroutine fgmres (zetax2 , etax2 , & ! Start outer (restarts) loop do ! Compute norm of initial residual - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared(nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj(:,iblk), & - arnoldi_basis_x(:,:,iblk, 1) , & - arnoldi_basis_y(:,:,iblk, 1) , & - norm_squared(iblk)) - - enddo - !$OMP END PARALLEL DO - norm_residual = sqrt(global_sum(sum(norm_squared), distrb_info)) + norm_residual = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, 1), & + arnoldi_basis_y(:,:,:, 1)) if (my_task == master_task .and. monitor_fgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_fgmres: iter_fgmres= ", nbiter, & @@ -2895,6 +2948,7 @@ subroutine fgmres (zetax2 , etax2 , & call precondition(zetax2 , etax2 , & Cb , vrel , & umassdti , & + halo_info_mask, & arnoldi_basis_x(:,:,:,initer), & arnoldi_basis_y(:,:,:,initer), & diagx , diagy , & @@ -2943,17 +2997,11 @@ subroutine fgmres (zetax2 , etax2 , & hessenberg) ! Compute norm of new Arnoldi vector and update Hessenberg matrix - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared(nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj(:, iblk) , & - arnoldi_basis_x(:,:,iblk, nextit), & - arnoldi_basis_y(:,:,iblk, nextit), & - norm_squared(iblk)) - enddo - !$OMP END PARALLEL DO - hessenberg(nextit,initer) = sqrt(global_sum(sum(norm_squared), distrb_info)) + hessenberg(nextit,initer) = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, nextit), & + arnoldi_basis_y(:,:,:, nextit)) ! Watch out for happy breakdown if (.not. almost_zero( hessenberg(nextit,initer) ) ) then @@ -3097,6 +3145,7 @@ end subroutine fgmres subroutine pgmres (zetax2 , etax2 , & Cb , vrel , & umassdti , & + halo_info_mask, & bx , by , & diagx , diagy , & tolerance, maxinner, & @@ -3104,6 +3153,11 @@ subroutine pgmres (zetax2 , etax2 , & solx , soly , & nbiter) + use ice_boundary, only: ice_HaloUpdate + use ice_domain, only: maskhalo_dyn, halo_info + use ice_fileunits, only: bfbflag + use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) etax2 ! etax2 = 2*eta (shear viscosity) @@ -3113,6 +3167,9 @@ subroutine pgmres (zetax2 , etax2 , & Cb , & ! seabed stress coefficient umassdti ! mass of U-cell/dte (kg/m^2 s) + type (ice_halo), intent(in) :: & + halo_info_mask ! ghost cell update info for masked halo + real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & bx , & ! Right hand side of the linear system (x components) by ! Right hand side of the linear system (y components) @@ -3231,18 +3288,11 @@ subroutine pgmres (zetax2 , etax2 , & ! Start outer (restarts) loop do ! Compute norm of initial residual - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared(nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj(:, iblk), & - arnoldi_basis_x(:,:,iblk, 1), & - arnoldi_basis_y(:,:,iblk, 1), & - norm_squared(iblk)) - - enddo - !$OMP END PARALLEL DO - norm_residual = sqrt(global_sum(sum(norm_squared), distrb_info)) + norm_residual = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, 1), & + arnoldi_basis_y(:,:,:, 1)) if (my_task == master_task .and. monitor_pgmres) then write(nu_diag, '(a,i4,a,d26.16)') "monitor_pgmres: iter_pgmres= ", nbiter, & @@ -3289,14 +3339,29 @@ subroutine pgmres (zetax2 , etax2 , & call precondition(zetax2 , etax2 , & Cb , vrel , & umassdti , & + halo_info_mask, & arnoldi_basis_x(:,:,:,initer), & arnoldi_basis_y(:,:,:,initer), & diagx , diagy , & precond_type, & workspace_x , workspace_y) - ! NOTE: halo updates for (workspace_x, workspace_y) - ! are skipped here for efficiency since this is just a preconditioner + ! Update workspace with boundary values + ! NOTE: skipped for efficiency since this is just a preconditioner + ! unless bfbflag is active + if (bfbflag /= 'off') then + call stack_fields(workspace_x, workspace_y, fld2) + call ice_timer_start(timer_bound) + if (maskhalo_dyn) then + call ice_HaloUpdate (fld2, halo_info_mask, & + field_loc_NEcorner, field_type_vector) + else + call ice_HaloUpdate (fld2, halo_info, & + field_loc_NEcorner, field_type_vector) + endif + call ice_timer_stop(timer_bound) + call unstack_fields(fld2, workspace_x, workspace_y) + endif !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -3325,17 +3390,11 @@ subroutine pgmres (zetax2 , etax2 , & hessenberg) ! Compute norm of new Arnoldi vector and update Hessenberg matrix - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_L2norm_squared(nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj(:, iblk) , & - arnoldi_basis_x(:,:,iblk, nextit), & - arnoldi_basis_y(:,:,iblk, nextit), & - norm_squared(iblk)) - enddo - !$OMP END PARALLEL DO - hessenberg(nextit,initer) = sqrt(global_sum(sum(norm_squared), distrb_info)) + hessenberg(nextit,initer) = global_norm(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, nextit), & + arnoldi_basis_y(:,:,:, nextit)) ! Watch out for happy breakdown if (.not. almost_zero( hessenberg(nextit,initer) ) ) then @@ -3426,6 +3485,7 @@ subroutine pgmres (zetax2 , etax2 , & call precondition(zetax2 , etax2 , & Cb , vrel , & umassdti , & + halo_info_mask, & workspace_x , workspace_y, & diagx , diagy , & precond_type, & @@ -3492,6 +3552,7 @@ end subroutine pgmres subroutine precondition(zetax2 , etax2, & Cb , vrel , & umassdti , & + halo_info_mask, & vx , vy , & diagx , diagy, & precond_type, & @@ -3506,6 +3567,9 @@ subroutine precondition(zetax2 , etax2, & Cb , & ! seabed stress coefficient umassdti ! mass of U-cell/dte (kg/m^2 s) + type (ice_halo), intent(in) :: & + halo_info_mask ! ghost cell update info for masked halo + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & vx , & ! input vector (x components) vy ! input vector (y components) @@ -3567,6 +3631,7 @@ subroutine precondition(zetax2 , etax2, & call pgmres (zetax2, etax2 , & Cb , vrel , & umassdti , & + halo_info_mask , & vx , vy , & diagx , diagy , & tolerance, maxinner, & @@ -3614,39 +3679,20 @@ subroutine orthogonalize(ortho_type , initer , & ij , & ! compressed index i, j ! grid indices - real (kind=dbl_kind), dimension (max_blocks) :: & - local_dot ! local array value to accumulate dot product of grid function over blocks - - real (kind=dbl_kind), dimension(maxinner) :: & - dotprod_local ! local array to accumulate several dot product computations - character(len=*), parameter :: subname = '(orthogonalize)' if (trim(ortho_type) == 'cgs') then ! Classical Gram-Schmidt ! Classical Gram-Schmidt orthogonalisation process ! First loop of Gram-Schmidt (compute coefficients) - dotprod_local = c0 do it = 1, initer - local_dot = c0 - - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - - local_dot(iblk) = local_dot(iblk) + & - (arnoldi_basis_x(i, j, iblk, it) * arnoldi_basis_x(i, j, iblk, nextit)) + & - (arnoldi_basis_y(i, j, iblk, it) * arnoldi_basis_y(i, j, iblk, nextit)) - enddo ! ij - enddo - !$OMP END PARALLEL DO - - dotprod_local(it) = sum(local_dot) + hessenberg(it, initer) = global_dot_product(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, it) , & + arnoldi_basis_y(:,:,:, it) , & + arnoldi_basis_x(:,:,:, nextit), & + arnoldi_basis_y(:,:,:, nextit)) end do - - hessenberg(1:initer, initer) = global_allreduce_sum(dotprod_local(1:initer), distrb_info) - ! Second loop of Gram-Schmidt (orthonormalize) do it = 1, initer !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) @@ -3666,22 +3712,13 @@ subroutine orthogonalize(ortho_type , initer , & elseif (trim(ortho_type) == 'mgs') then ! Modified Gram-Schmidt ! Modified Gram-Schmidt orthogonalisation process do it = 1, initer - local_dot = c0 - - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - - local_dot(iblk) = local_dot(iblk) + & - (arnoldi_basis_x(i, j, iblk, it) * arnoldi_basis_x(i, j, iblk, nextit)) + & - (arnoldi_basis_y(i, j, iblk, it) * arnoldi_basis_y(i, j, iblk, nextit)) - enddo ! ij - enddo - !$OMP END PARALLEL DO - - hessenberg(it,initer) = global_sum(sum(local_dot), distrb_info) + hessenberg(it, initer) = global_dot_product(nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + arnoldi_basis_x(:,:,:, it) , & + arnoldi_basis_y(:,:,:, it) , & + arnoldi_basis_x(:,:,:, nextit), & + arnoldi_basis_y(:,:,:, nextit)) !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) do iblk = 1, nblocks diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 893c3da9a..0b9a62542 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -419,7 +419,7 @@ subroutine input_data deltaminEVP = 1e-11_dbl_kind ! minimum delta for viscosities (EVP, Hunke 2001) deltaminVP = 2e-9_dbl_kind ! minimum delta for viscosities (VP, Hibler 1979) capping_method = 'max' ! method for capping of viscosities (max=Hibler 1979,sum=Kreyscher2000) - maxits_nonlin = 4 ! max nb of iteration for nonlinear solver + maxits_nonlin = 10 ! max nb of iteration for nonlinear solver precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), ! 'pgmres' (Jacobi-preconditioned GMRES) dim_fgmres = 50 ! size of fgmres Krylov subspace @@ -431,7 +431,7 @@ subroutine input_data monitor_pgmres = .false. ! print pgmres residual norm ortho_type = 'mgs' ! orthogonalization procedure 'cgs' or 'mgs' reltol_nonlin = 1e-8_dbl_kind ! nonlinear stopping criterion: reltol_nonlin*res(k=0) - reltol_fgmres = 1e-2_dbl_kind ! fgmres stopping criterion: reltol_fgmres*res(k) + reltol_fgmres = 1e-1_dbl_kind ! fgmres stopping criterion: reltol_fgmres*res(k) reltol_pgmres = 1e-6_dbl_kind ! pgmres stopping criterion: reltol_pgmres*res(k) algo_nonlin = 'picard' ! nonlinear algorithm: 'picard' (Picard iteration), 'anderson' (Anderson acceleration) fpfunc_andacc = 1 ! fixed point function for Anderson acceleration: diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 index a5fed760b..4b94389f7 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 @@ -36,7 +36,6 @@ module ice_global_reductions private public :: global_sum, & - global_allreduce_sum, & global_sum_prod, & global_maxval, & global_minval @@ -56,12 +55,6 @@ module ice_global_reductions global_sum_scalar_int end interface - interface global_allreduce_sum - module procedure global_allreduce_sum_vector_dbl!, & - ! module procedure global_allreduce_sum_vector_real, & ! not yet implemented - ! module procedure global_allreduce_sum_vector_int ! not yet implemented - end interface - interface global_sum_prod module procedure global_sum_prod_dbl, & global_sum_prod_real, & @@ -707,68 +700,6 @@ function global_sum_scalar_int(scalar, dist) & end function global_sum_scalar_int -!*********************************************************************** - - function global_allreduce_sum_vector_dbl(vector, dist) & - result(globalSums) - -! Computes the global sums of sets of scalars (elements of 'vector') -! distributed across a parallel machine. -! -! This is actually the specific interface for the generic global_allreduce_sum -! function corresponding to double precision vectors. The generic -! interface is identical but will handle real and integer vectors. - - real (dbl_kind), dimension(:), intent(in) :: & - vector ! vector whose components are to be summed - - type (distrb), intent(in) :: & - dist ! block distribution - - real (dbl_kind), dimension(size(vector)) :: & - globalSums ! resulting array of global sums - -!----------------------------------------------------------------------- -! -! local variables -! -!----------------------------------------------------------------------- - - integer (int_kind) :: & - numProcs, &! number of processor participating - numBlocks, &! number of local blocks - communicator, &! communicator for this distribution - numElem ! number of elements in vector - - real (dbl_kind), dimension(:,:), allocatable :: & - work ! temporary local array - - character(len=*), parameter :: subname = '(global_allreduce_sum_vector_dbl)' - -!----------------------------------------------------------------------- -! -! get communicator for MPI calls -! -!----------------------------------------------------------------------- - - call ice_distributionGet(dist, & - numLocalBlocks = numBlocks, & - nprocs = numProcs, & - communicator = communicator) - - numElem = size(vector) - allocate(work(1,numElem)) - work(1,:) = vector - globalSums = c0 - - call compute_sums_dbl(work,globalSums,communicator,numProcs) - - deallocate(work) - -!----------------------------------------------------------------------- - - end function global_allreduce_sum_vector_dbl - !*********************************************************************** function global_sum_prod_dbl (array1, array2, dist, field_loc, & diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 index e859ea2bd..5fcd45876 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 @@ -37,7 +37,6 @@ module ice_global_reductions private public :: global_sum, & - global_allreduce_sum, & global_sum_prod, & global_maxval, & global_minval @@ -57,12 +56,6 @@ module ice_global_reductions global_sum_scalar_int end interface - interface global_allreduce_sum - module procedure global_allreduce_sum_vector_dbl!, & - ! module procedure global_allreduce_sum_vector_real, & ! not yet implemented - ! module procedure global_allreduce_sum_vector_int ! not yet implemented - end interface - interface global_sum_prod module procedure global_sum_prod_dbl, & global_sum_prod_real, & @@ -708,68 +701,6 @@ function global_sum_scalar_int(scalar, dist) & end function global_sum_scalar_int -!*********************************************************************** - - function global_allreduce_sum_vector_dbl(vector, dist) & - result(globalSums) - -! Computes the global sums of sets of scalars (elements of 'vector') -! distributed across a parallel machine. -! -! This is actually the specific interface for the generic global_allreduce_sum -! function corresponding to double precision vectors. The generic -! interface is identical but will handle real and integer vectors. - - real (dbl_kind), dimension(:), intent(in) :: & - vector ! vector whose components are to be summed - - type (distrb), intent(in) :: & - dist ! block distribution - - real (dbl_kind), dimension(size(vector)) :: & - globalSums ! resulting array of global sums - -!----------------------------------------------------------------------- -! -! local variables -! -!----------------------------------------------------------------------- - - integer (int_kind) :: & - numProcs, &! number of processor participating - numBlocks, &! number of local blocks - communicator, &! communicator for this distribution - numElem ! number of elements in vector - - real (dbl_kind), dimension(:,:), allocatable :: & - work ! temporary local array - - character(len=*), parameter :: subname = '(global_allreduce_sum_vector_dbl)' - -!----------------------------------------------------------------------- -! -! get communicator for MPI calls -! -!----------------------------------------------------------------------- - - call ice_distributionGet(dist, & - numLocalBlocks = numBlocks, & - nprocs = numProcs, & - communicator = communicator) - - numElem = size(vector) - allocate(work(1,numElem)) - work(1,:) = vector - globalSums = c0 - - call compute_sums_dbl(work,globalSums,communicator,numProcs) - - deallocate(work) - -!----------------------------------------------------------------------- - - end function global_allreduce_sum_vector_dbl - !*********************************************************************** function global_sum_prod_dbl (array1, array2, dist, field_loc, & diff --git a/cicecore/drivers/unittest/sumchk/sumchk.F90 b/cicecore/drivers/unittest/sumchk/sumchk.F90 index aba435b0e..d9ea72d8c 100644 --- a/cicecore/drivers/unittest/sumchk/sumchk.F90 +++ b/cicecore/drivers/unittest/sumchk/sumchk.F90 @@ -58,9 +58,6 @@ program sumchk integer(int_kind),parameter :: ntests3 = 3 character(len=8) :: errorflag3(ntests3) character(len=32) :: stringflag3(ntests3) - integer(int_kind),parameter :: ntests4 = 1 - character(len=8) :: errorflag4(ntests4) - character(len=32) :: stringflag4(ntests4) integer(int_kind) :: npes, ierr, ntask @@ -100,7 +97,6 @@ program sumchk errorflag1 = passflag errorflag2 = passflag errorflag3 = passflag - errorflag4 = passflag npes = get_num_procs() if (my_task == master_task) then @@ -600,63 +596,6 @@ program sumchk endif enddo - ! --------------------------- - ! Test Vector Reductions - ! --------------------------- - - if (my_task == master_task) write(6,*) ' ' - - n = 1 ; stringflag4(n) = 'dble sum vector' - allocate(vec8(3)) - allocate(sum8(3)) - - minval = -5. - maxval = 8. - - vec8(1) = 1. - - ! fill one gridcell with a min and max value - ntask = max(npes-1,1)-1 - if (my_task == ntask) then - vec8(1) = minval - endif - ntask = min(npes,2)-1 - if (my_task == ntask) then - vec8(1) = maxval - endif - vec8(2) = 2. * vec8(1) - vec8(3) = 3. * vec8(1) - - ! compute correct results - if (npes == 1) then - minval = maxval - corval = maxval - else - corval = (npes - 2) * 1.0 + minval + maxval - endif - - do k = 1,ntests4 - string = stringflag4(k) - sum8 = -888e12 - if (k == 1) then - sum8 = global_allreduce_sum(vec8, distrb_info) - else - call abort_ice(subname//' illegal k vector',file=__FILE__,line=__LINE__) - endif - - if (my_task == master_task) then - write(6,'(1x,a,3g16.8)') string, sum8(1),sum8(2),sum8(3) - endif - - if (sum8(1) /= corval .or. sum8(2) /= 2.*corval .or. sum8(3) /= 3.*corval) then - errorflag4(k) = failflag - errorflag0 = failflag - if (my_task == master_task) then - write(6,*) '**** ERROR ', sum8(1),sum8(2),sum8(3),corval - endif - endif - enddo - ! --------------------------- if (my_task == master_task) then @@ -670,9 +609,6 @@ program sumchk do k = 1,ntests3 write(6,*) errorflag3(k),stringflag3(k) enddo - do k = 1,ntests4 - write(6,*) errorflag4(k),stringflag4(k) - enddo write(6,*) ' ' write(6,*) 'SUMCHK COMPLETED SUCCESSFULLY' if (errorflag0 == passflag) then diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index e5cbd6fcc..8262f34ec 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -167,7 +167,7 @@ kridge = 1 ktransport = 1 ssh_stress = 'geostrophic' - maxits_nonlin = 4 + maxits_nonlin = 10 precond = 'pgmres' dim_fgmres = 50 dim_pgmres = 5 @@ -178,7 +178,7 @@ monitor_pgmres = .false. ortho_type = 'mgs' reltol_nonlin = 1e-8 - reltol_fgmres = 1e-2 + reltol_fgmres = 1e-1 reltol_pgmres = 1e-6 algo_nonlin = 'picard' use_mean_vrel = .true. diff --git a/configuration/scripts/machines/env.ppp5_intel b/configuration/scripts/machines/env.ppp5_intel index c4987124a..c6e272692 100644 --- a/configuration/scripts/machines/env.ppp5_intel +++ b/configuration/scripts/machines/env.ppp5_intel @@ -18,6 +18,12 @@ source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+ # module load -s icc mpi setenv FOR_DUMP_CORE_FILE 1 setenv I_MPI_DEBUG_COREDUMP 1 +# Reproducible collectives +if (${ICE_BASEGEN} != ${ICE_SPVAL} || ${ICE_BASECOM} != ${ICE_SPVAL}) then + setenv I_MPI_CBWR 1 +endif +# Stop being buggy +setenv I_MPI_FABRICS ofi # NetCDF source $ssmuse -d main/opt/hdf5-netcdf4/serial/shared/inteloneapi-2022.1.2/01 @@ -32,6 +38,7 @@ setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/ppp5/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall5/sitestore/eccc/cmd/e/sice500/ setenv ICE_MACHINE_BASELINE ~/data/ppp5/cice/baselines/ +setenv ICE_MACHINE_MAXRUNLENGTH 6 setenv ICE_MACHINE_SUBMIT qsub setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT unused diff --git a/configuration/scripts/machines/env.ppp6_gnu b/configuration/scripts/machines/env.ppp6_gnu index 69ed6ff8b..94f478774 100644 --- a/configuration/scripts/machines/env.ppp6_gnu +++ b/configuration/scripts/machines/env.ppp6_gnu @@ -8,7 +8,7 @@ endif if ("$inp" != "-nomodules") then # OpenMPI -source /usr/mpi/gcc/openmpi-4.1.2a1/bin/mpivars.csh +setenv PATH "/home/phb001/.local_rhel-8-icelake-64_gcc/bin:$PATH" # OpenMP setenv OMP_STACKSIZE 64M @@ -21,6 +21,7 @@ setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baselines/ +setenv ICE_MACHINE_MAXRUNLENGTH 6 setenv ICE_MACHINE_SUBMIT qsub setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT unused diff --git a/configuration/scripts/machines/env.ppp6_gnu-impi b/configuration/scripts/machines/env.ppp6_gnu-impi index 461e09a43..61cd46ed9 100644 --- a/configuration/scripts/machines/env.ppp6_gnu-impi +++ b/configuration/scripts/machines/env.ppp6_gnu-impi @@ -18,6 +18,12 @@ setenv I_MPI_F90 gfortran setenv I_MPI_FC gfortran setenv I_MPI_CC gcc setenv I_MPI_CXX g++ +# Reproducible collectives +if (${ICE_BASEGEN} != ${ICE_SPVAL} || ${ICE_BASECOM} != ${ICE_SPVAL}) then + setenv I_MPI_CBWR 1 +endif +# Stop being buggy +setenv I_MPI_FABRICS ofi # OpenMP setenv OMP_STACKSIZE 64M @@ -30,6 +36,7 @@ setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baselines/ +setenv ICE_MACHINE_MAXRUNLENGTH 6 setenv ICE_MACHINE_SUBMIT qsub setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT unused diff --git a/configuration/scripts/machines/env.ppp6_intel b/configuration/scripts/machines/env.ppp6_intel index ef9396575..faecb9b5a 100644 --- a/configuration/scripts/machines/env.ppp6_intel +++ b/configuration/scripts/machines/env.ppp6_intel @@ -18,6 +18,12 @@ source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+ # module load -s icc mpi setenv FOR_DUMP_CORE_FILE 1 setenv I_MPI_DEBUG_COREDUMP 1 +# Reproducible collectives +if (${ICE_BASEGEN} != ${ICE_SPVAL} || ${ICE_BASECOM} != ${ICE_SPVAL}) then + setenv I_MPI_CBWR 1 +endif +# Stop being buggy +setenv I_MPI_FABRICS ofi # NetCDF source $ssmuse -d main/opt/hdf5-netcdf4/serial/shared/inteloneapi-2022.1.2/01 @@ -32,6 +38,7 @@ setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/ppp6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ setenv ICE_MACHINE_BASELINE ~/data/ppp6/cice/baselines/ +setenv ICE_MACHINE_MAXRUNLENGTH 6 setenv ICE_MACHINE_SUBMIT qsub setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT unused diff --git a/configuration/scripts/machines/env.ppp6_intel19 b/configuration/scripts/machines/env.ppp6_intel19 index 6cdd9a036..554b31e42 100644 --- a/configuration/scripts/machines/env.ppp6_intel19 +++ b/configuration/scripts/machines/env.ppp6_intel19 @@ -17,6 +17,12 @@ setenv FOR_DUMP_CORE_FILE 1 source $ssmuse -d /fs/ssm/hpco/exp/intelpsxe-impi-19.0.3.199 setenv FI_PROVIDER verbs setenv I_MPI_DEBUG_COREDUMP 1 +# Reproducible collectives +if (${ICE_BASEGEN} != ${ICE_SPVAL} || ${ICE_BASECOM} != ${ICE_SPVAL}) then + setenv I_MPI_CBWR 1 +endif +# Stop being buggy +setenv I_MPI_FABRICS ofi # NetCDF source $ssmuse -d hpco/exp/hdf5-netcdf4/serial/static/intel-19.0.3.199/02 @@ -31,6 +37,7 @@ setenv ICE_MACHINE_MAKE make setenv ICE_MACHINE_WKDIR ~/data/site6/cice/runs/ setenv ICE_MACHINE_INPUTDATA /space/hall6/sitestore/eccc/cmd/e/sice500/ setenv ICE_MACHINE_BASELINE ~/data/site6/cice/baselines/ +setenv ICE_MACHINE_MAXRUNLENGTH 6 setenv ICE_MACHINE_SUBMIT "qsub" setenv ICE_MACHINE_TPNODE 80 setenv ICE_MACHINE_ACCT P0000000 diff --git a/configuration/scripts/machines/env.robert_intel b/configuration/scripts/machines/env.robert_intel index d3d9c1eae..592d765fe 100644 --- a/configuration/scripts/machines/env.robert_intel +++ b/configuration/scripts/machines/env.robert_intel @@ -18,6 +18,8 @@ source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+ # module load -s icc mpi setenv FOR_DUMP_CORE_FILE 1 setenv I_MPI_DEBUG_COREDUMP 1 +# Stop being buggy +setenv I_MPI_FABRICS ofi # NetCDF source $ssmuse -d main/opt/hdf5-netcdf4/serial/shared/inteloneapi-2022.1.2/01 diff --git a/configuration/scripts/machines/env.underhill_intel b/configuration/scripts/machines/env.underhill_intel index bc3eec857..dee0ab92b 100644 --- a/configuration/scripts/machines/env.underhill_intel +++ b/configuration/scripts/machines/env.underhill_intel @@ -18,6 +18,8 @@ source $ssmuse -d /fs/ssm/main/opt/intelcomp/inteloneapi-2022.1.2/intelcomp+mpi+ # module load -s icc mpi setenv FOR_DUMP_CORE_FILE 1 setenv I_MPI_DEBUG_COREDUMP 1 +# Stop being buggy +setenv I_MPI_FABRICS ofi # NetCDF source $ssmuse -d main/opt/hdf5-netcdf4/serial/shared/inteloneapi-2022.1.2/01 diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 9dc2a06c4..763475992 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -72,7 +72,7 @@ either Celsius or Kelvin units). "awtvdf", "weighting factor for visible, diffuse albedo", "0.63282" "awtvdr", "weighting factor for visible, direct albedo", "0.00318" "**B**", "", "" - "bfb_flag", "for bit-for-bit reproducible diagnostics", "" + "bfbflag", "for bit-for-bit reproducible diagnostics, and reproducible outputs when using the VP solver", "" "bgc_data_dir", "data directory for bgc", "" "bgc_data_type", "source of silicate, nitrate data", "" "bgc_flux_type", "ice–ocean flux velocity", "" diff --git a/doc/source/developer_guide/dg_other.rst b/doc/source/developer_guide/dg_other.rst index 8d4f9716e..308c2629c 100644 --- a/doc/source/developer_guide/dg_other.rst +++ b/doc/source/developer_guide/dg_other.rst @@ -30,11 +30,11 @@ To run with an interactive debugger, the following general steps should be taken Reproducible Sums ---------------------- -Reproducible sums in the CICE diagnostics are set with the namelist `bfbflag`. -CICE prognostics results do NOT depend on the global sum implementation. The +Reproducible sums in CICE are set with the namelist `bfbflag`. +CICE prognostics results do NOT depend on the global sum implementation when using the default dynamics solver (EVP) or the EAP solver. With these solvers, the results are bit-for-bit identical with any `bfbflag`. The `bfbflag` only impacts the results and performance of the global diagnostics written to the CICE -log file. For best performance, the off (or lsum8 which is equivalent) setting is recommended. +log file (for all dynamics solvers), as well as the model results when using the VP solver. For best performance, the off setting is recommended. This will probably not produce bit-for-bit results with different decompositions. For bit-for-bit results, the reprosum setting is recommended. This should be only slightly slower than the lsum8 implementation. @@ -50,7 +50,7 @@ The `bfbflag` namelist is a character string with several valid settings. The tradeoff in these settings is the likelihood for bit-for-bit results versus their cost. The `bfbflag` settings are implemented as follows, -off is the default and equivalent to lsum8. +off is the default and mostly equivalent to lsum8 (some computations in the VP solver use a different code path when lsum8 is chosen). lsum4 is a local sum computed with single precision (4 byte) data and a scalar mpi allreduce. This is extremely unlikely to be bit-for-bit for different decompositions. diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index 241fa05fe..e6b918538 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -92,8 +92,7 @@ dynamics into CICE is described in detail in The VP solver implementation mostly follows :cite:`Lemieux08`, with FGMRES :cite:`Saad93` as the linear solver and GMRES as the preconditioner. -Note that the VP solver has not yet been tested on the ``tx1`` grid or with -threading enabled. +Note that the VP solver has not yet been tested on the ``tx1`` grid. The EVP, rEVP, EAP and VP approaches are all available with the B grid. However, at the moment, only the EVP and rEVP schemes are possible with the C grid. diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 0078dd102..b2dd54c33 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -469,7 +469,7 @@ dynamics_nml "``k1``", "real", "1st free parameter for landfast parameterization", "7.5" "``k2``", "real", "2nd free parameter (N/m\ :math:`^3`) for landfast parameterization", "15.0" "``maxits_fgmres``", "integer", "maximum number of restarts for FGMRES solver", "1" - "``maxits_nonlin``", "integer", "maximum number of nonlinear iterations for VP solver", "1000" + "``maxits_nonlin``", "integer", "maximum number of nonlinear iterations for VP solver", "10" "``maxits_pgmres``", "integer", "maximum number of restarts for PGMRES preconditioner", "1" "``monitor_fgmres``", "logical", "write velocity norm at each FGMRES iteration", "``.false.``" "``monitor_nonlin``", "logical", "write velocity norm at each nonlinear iteration", "``.false.``" @@ -482,7 +482,7 @@ dynamics_nml "", "``ident``", "Don't use a preconditioner for the FGMRES solver", "" "", "``pgmres``", "Use GMRES as preconditioner for FGMRES solver", "" "``Pstar``", "real", "constant in Hibler strength formula (N/m\ :math:`^2`)", "2.75e4" - "``reltol_fgmres``", "real", "relative tolerance for FGMRES solver", "1e-2" + "``reltol_fgmres``", "real", "relative tolerance for FGMRES solver", "1e-1" "``reltol_nonlin``", "real", "relative tolerance for nonlinear solver", "1e-8" "``reltol_pgmres``", "real", "relative tolerance for PGMRES preconditioner", "1e-6" "``revised_evp``", "logical", "use revised EVP formulation", "``.false.``" From db2d7a9f92ba8e7d53c3e85d67623b787fba09c5 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Thu, 20 Oct 2022 15:25:41 -0700 Subject: [PATCH 30/57] Update Narwhal Port (#776) * Update narwhal port, add narwhal intelhpcx * Update comparelog scripts and narwhal aprun launch, add -q --- .../cicedynB/infrastructure/ice_domain.F90 | 1 + configuration/scripts/cice.launch.csh | 58 ++++++++++--------- .../scripts/machines/Macros.narwhal_aocc | 2 +- .../scripts/machines/Macros.narwhal_intelhpcx | 57 ++++++++++++++++++ .../scripts/machines/env.narwhal_aocc | 19 +++--- .../scripts/machines/env.narwhal_cray | 20 +++---- .../scripts/machines/env.narwhal_gnu | 22 +++---- .../scripts/machines/env.narwhal_intel | 20 +++---- .../scripts/machines/env.narwhal_intelhpcx | 52 +++++++++++++++++ configuration/scripts/tests/baseline.script | 6 +- 10 files changed, 179 insertions(+), 78 deletions(-) create mode 100644 configuration/scripts/machines/Macros.narwhal_intelhpcx create mode 100755 configuration/scripts/machines/env.narwhal_intelhpcx diff --git a/cicecore/cicedynB/infrastructure/ice_domain.F90 b/cicecore/cicedynB/infrastructure/ice_domain.F90 index ac56356e5..10254aa93 100644 --- a/cicecore/cicedynB/infrastructure/ice_domain.F90 +++ b/cicecore/cicedynB/infrastructure/ice_domain.F90 @@ -234,6 +234,7 @@ subroutine init_domain_blocks #if (defined CESMCOUPLED) nprocs = get_num_procs() #else + write(nu_diag,*) subname,'ERROR: nprocs, get_num_procs = ',nprocs,get_num_procs() call abort_ice(subname//'ERROR: Input nprocs not same as system request') #endif else if (nghost < 1) then diff --git a/configuration/scripts/cice.launch.csh b/configuration/scripts/cice.launch.csh index bc9ff2b99..28e020efc 100755 --- a/configuration/scripts/cice.launch.csh +++ b/configuration/scripts/cice.launch.csh @@ -11,7 +11,7 @@ set jobfile = $1 source ${ICE_SCRIPTS}/setup_machparams.csh #========================================== -if (${ICE_MACHINE} =~ cheyenne*) then +if (${ICE_MACHCOMP} =~ cheyenne*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -23,7 +23,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ hobart* || ${ICE_MACHINE} =~ izumi*) then +else if (${ICE_MACHCOMP} =~ hobart* || ${ICE_MACHCOMP} =~ izumi*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -35,7 +35,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ gaffney* || ${ICE_MACHINE} =~ koehr* || ${ICE_MACHINE} =~ mustang*) then +else if (${ICE_MACHCOMP} =~ gaffney* || ${ICE_MACHCOMP} =~ koehr* || ${ICE_MACHCOMP} =~ mustang*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -47,7 +47,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ nrlssc*) then +else if (${ICE_MACHCOMP} =~ nrlssc*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -59,13 +59,19 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ onyx* || ${ICE_MACHINE} =~ narwhal) then +else if (${ICE_MACHCOMP} =~ narwhal_*hpcx*) then cat >> ${jobfile} << EOFR -aprun -n ${ntasks} -N ${taskpernodelimit} -d ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE +mpirun -np ${ntasks} -hostfile \$PBS_NODEFILE \${EXTRA_OMPI_SETTINGS} ./cice >&! \$ICE_RUNLOG_FILE +EOFR + +#======= +else if (${ICE_MACHCOMP} =~ onyx* || ${ICE_MACHCOMP} =~ narwhal*) then +cat >> ${jobfile} << EOFR +aprun -q -n ${ntasks} -N ${taskpernodelimit} -d ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ cori*) then +else if (${ICE_MACHCOMP} =~ cori*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -77,7 +83,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ compy*) then +else if (${ICE_MACHCOMP} =~ compy*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -89,7 +95,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ badger*) then +else if (${ICE_MACHCOMP} =~ badger*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -101,7 +107,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ fram*) then +else if (${ICE_MACHCOMP} =~ fram*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -113,7 +119,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ cesium*) then +else if (${ICE_MACHCOMP} =~ cesium*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -125,7 +131,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ millikan*) then +else if (${ICE_MACHCOMP} =~ millikan*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -137,7 +143,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ daley* || ${ICE_MACHINE} =~ banting*) then +else if (${ICE_MACHCOMP} =~ daley* || ${ICE_MACHCOMP} =~ banting*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -149,7 +155,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ ppp5* || ${ICE_MACHINE} =~ ppp6* || ${ICE_MACHINE} =~ robert* || ${ICE_MACHINE} =~ underhill*) then +else if (${ICE_MACHCOMP} =~ ppp5* || ${ICE_MACHCOMP} =~ ppp6* || ${ICE_MACHCOMP} =~ robert* || ${ICE_MACHCOMP} =~ underhill*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -161,7 +167,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ ppp3*) then +else if (${ICE_MACHCOMP} =~ ppp3*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -173,7 +179,7 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ gpsc3*) then +else if (${ICE_MACHCOMP} =~ gpsc3*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -186,7 +192,7 @@ endif #======= -else if (${ICE_MACHINE} =~ freya*) then +else if (${ICE_MACHCOMP} =~ freya*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR aprun -n 1 -N 1 -d 1 ./cice >&! \$ICE_RUNLOG_FILE @@ -198,45 +204,45 @@ EOFR endif #======= -else if (${ICE_MACHINE} =~ gaea*) then +else if (${ICE_MACHCOMP} =~ gaea*) then cat >> ${jobfile} << EOFR srun -n ${ntasks} -c ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ hera*) then +else if (${ICE_MACHCOMP} =~ hera*) then cat >> ${jobfile} << EOFR srun -n ${ntasks} -c ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ orion*) then +else if (${ICE_MACHCOMP} =~ orion*) then cat >> ${jobfile} << EOFR srun -n ${ntasks} -c ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ high_Sierra*) then +else if (${ICE_MACHCOMP} =~ high_Sierra*) then cat >> ${jobfile} << EOFR mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE #./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ phase3*) then +else if (${ICE_MACHCOMP} =~ phase3*) then cat >> ${jobfile} << EOFR mpirun -np ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE #./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ testmachine*) then +else if (${ICE_MACHCOMP} =~ testmachine*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE EOFR #======= -else if (${ICE_MACHINE} =~ travisCI*) then +else if (${ICE_MACHCOMP} =~ travisCI*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -251,7 +257,7 @@ endif #EOFR #======= -else if (${ICE_MACHINE} =~ conda*) then +else if (${ICE_MACHCOMP} =~ conda*) then if (${ICE_COMMDIR} =~ serial*) then cat >> ${jobfile} << EOFR ./cice >&! \$ICE_RUNLOG_FILE @@ -265,7 +271,7 @@ endif #======= else - echo "${0} ERROR ${ICE_MACHINE} unknown" + echo "${0} ERROR ${ICE_MACHCOMP} unknown" exit -1 endif #======= diff --git a/configuration/scripts/machines/Macros.narwhal_aocc b/configuration/scripts/machines/Macros.narwhal_aocc index 95f301e85..b22aeda64 100644 --- a/configuration/scripts/machines/Macros.narwhal_aocc +++ b/configuration/scripts/machines/Macros.narwhal_aocc @@ -49,7 +49,7 @@ INCLDIR := $(INCLDIR) ifeq ($(ICE_THREADED), true) LDFLAGS += -mp - CFLAGS += -mp +# CFLAGS += -mp FFLAGS += -mp else LDFLAGS += -nomp diff --git a/configuration/scripts/machines/Macros.narwhal_intelhpcx b/configuration/scripts/machines/Macros.narwhal_intelhpcx new file mode 100644 index 000000000..0e2a94b20 --- /dev/null +++ b/configuration/scripts/machines/Macros.narwhal_intelhpcx @@ -0,0 +1,57 @@ +#============================================================================== +# Macros file for NAVYDSRC narwhal, intel compiler with hpcx hcoll openmpi +#============================================================================== + +CPP := fpp +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 -fp-model precise -fcommon + +FIXEDFLAGS := -132 +FREEFLAGS := -FR +FFLAGS := -fp-model precise -convert big_endian -assume byterecl -ftz -traceback +# -mcmodel medium -shared-intel +FFLAGS_NOOPT:= -O0 + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created +# FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -init=snan,arrays +else + FFLAGS += -O2 +endif + +SCC := cc +SFC := ftn +MPICC := mpicc +MPIFC := mpif90 + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +# defined by module +NETCDF_PATH := $(NETCDF_DIR) +#PNETCDF_PATH := $(PNETCDF) +#PNETCDF_PATH := /glade/apps/opt/pnetcdf/1.3.0/intel/default +#LAPACK_LIBDIR := /glade/apps/opt/lapack/3.4.2/intel/12.1.5/lib + +#PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs + +INCLDIR := $(INCLDIR) +INCLDIR += -I$(NETCDF_PATH)/include + +LIB_NETCDF := $(NETCDF_PATH)/lib +#LIB_PNETCDF := $(PNETCDF_PATH)/lib +#LIB_MPI := $(IMPILIBDIR) +SLIBS := $(HCOLL_LIBS) -L$(NETCDF_PATH)/lib -lnetcdf -lnetcdff + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -qopenmp + CFLAGS += -qopenmp + FFLAGS += -qopenmp +endif + diff --git a/configuration/scripts/machines/env.narwhal_aocc b/configuration/scripts/machines/env.narwhal_aocc index 4016a1d7d..875296520 100755 --- a/configuration/scripts/machines/env.narwhal_aocc +++ b/configuration/scripts/machines/env.narwhal_aocc @@ -14,21 +14,18 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-nvidia -module load PrgEnv-aocc/8.1.0 -module load cray-pals/1.0.17 -module load bct-env/0.1 -module unload aocc -module load aocc/2.2.0.1 -module unload cray-mpich -module load cray-mpich/8.1.5 - +module load PrgEnv-aocc +module load cpe/22.03 +module load aocc/3.0.0 +module unload cray-pals +module load cray-pals/1.2.2 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf module unload netcdf -module load cray-netcdf/4.7.4.4 -module load cray-hdf5/1.12.0.4 +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited @@ -41,7 +38,7 @@ endif setenv ICE_MACHINE_MACHNAME narwhal setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "aocc_3.0.0-Build#78 2020_12_10 clang/flang 12.0.0, cray-mpich/8.1.5, netcdf/4.7.4.4" +setenv ICE_MACHINE_ENVINFO "aocc_3.0.0-Build#78 2020_12_10 clang/flang 12.0.0, cray-mpich/8.1.14, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.narwhal_cray b/configuration/scripts/machines/env.narwhal_cray index d0fcc9ba7..a9e5bd14a 100755 --- a/configuration/scripts/machines/env.narwhal_cray +++ b/configuration/scripts/machines/env.narwhal_cray @@ -14,21 +14,17 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-nvidia -module load PrgEnv-cray/8.1.0 -module load cray-pals/1.0.17 -module load bct-env/0.1 -module unload cce -module load cce/12.0.3 -module unload cray-mpich -module load cray-mpich/8.1.9 - +module load PrgEnv-cray +module load cpe/22.03 +module unload cray-pals +module load cray-pals/1.2.2 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf module unload netcdf -module load cray-netcdf/4.7.4.4 -module load cray-hdf5/1.12.0.4 +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited @@ -39,9 +35,9 @@ setenv OMP_WAIT_POLICY PASSIVE endif setenv ICE_MACHINE_MACHNAME narwhal -setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" setenv ICE_MACHINE_ENVNAME cray -setenv ICE_MACHINE_ENVINFO "cce 12.0.3, cray-mpich/8.1.9, netcdf/4.7.4.4" +setenv ICE_MACHINE_ENVINFO "cce 13.0.2, cray-mpich/8.1.14, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.narwhal_gnu b/configuration/scripts/machines/env.narwhal_gnu index 51a272f4e..701920161 100755 --- a/configuration/scripts/machines/env.narwhal_gnu +++ b/configuration/scripts/machines/env.narwhal_gnu @@ -14,21 +14,17 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-nvidia -module load PrgEnv-gnu/8.1.0 -module load cray-pals/1.0.17 -module load bct-env/0.1 -module unload gcc -module load gcc/11.2.0 -module unload cray-mpich -module load cray-mpich/8.1.9 - +module load PrgEnv-gnu +module load cpe/22.03 +module unload cray-pals +module load cray-pals/1.2.2 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf module unload netcdf -module load cray-netcdf/4.7.4.4 -module load cray-hdf5/1.12.0.4 +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited @@ -39,9 +35,9 @@ setenv OMP_WAIT_POLICY PASSIVE endif setenv ICE_MACHINE_MACHNAME narwhal -setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" -setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "gnu fortran/c 11.2.0, cray-mpich/8.1.9, netcdf/4.7.4.4" +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" +setenv ICE_MACHINE_ENVNAME gnu +setenv ICE_MACHINE_ENVINFO "gnu c/fortran 11.2.0 20210728, cray-mpich/8.1.14, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.narwhal_intel b/configuration/scripts/machines/env.narwhal_intel index f79d962ff..4cc60acac 100755 --- a/configuration/scripts/machines/env.narwhal_intel +++ b/configuration/scripts/machines/env.narwhal_intel @@ -14,21 +14,17 @@ module unload PrgEnv-cray module unload PrgEnv-gnu module unload PrgEnv-intel module unload PrgEnv-nvidia -module load PrgEnv-intel/8.0.0 -module load cray-pals/1.0.17 -module load bct-env/0.1 -module unload intel -module load intel/2021.1 -module unload cray-mpich -module load cray-mpich/8.1.9 - +module load PrgEnv-intel +module load cpe/22.03 +module unload cray-pals +module load cray-pals/1.2.2 module unload cray-hdf5 module unload cray-hdf5-parallel module unload cray-netcdf-hdf5parallel module unload cray-parallel-netcdf module unload netcdf -module load cray-netcdf/4.7.4.4 -module load cray-hdf5/1.12.0.4 +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 setenv NETCDF_PATH ${NETCDF_DIR} limit coredumpsize unlimited @@ -39,9 +35,9 @@ setenv OMP_WAIT_POLICY PASSIVE endif setenv ICE_MACHINE_MACHNAME narwhal -setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12" +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "ifort 2021.1 Beta 20201112, cray-mpich/8.1.9, netcdf/4.7.4.4" +setenv ICE_MACHINE_ENVINFO "ifort 2021.4.0 20210910, cray-mpich/8.1.14, netcdf/4.8.1.1" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium diff --git a/configuration/scripts/machines/env.narwhal_intelhpcx b/configuration/scripts/machines/env.narwhal_intelhpcx new file mode 100755 index 000000000..5b7a33ece --- /dev/null +++ b/configuration/scripts/machines/env.narwhal_intelhpcx @@ -0,0 +1,52 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +source ${MODULESHOME}/init/csh + +module unload hpcx-ompi-intel +module unload libfabric +module unload PrgEnv-aocc +module unload PrgEnv-cray +module unload PrgEnv-gnu +module unload PrgEnv-intel +module unload PrgEnv-nvidia +module unload craype-network-ofi +module unload craype-network-ucx +module load hpcx-ompi-intel +module load libfabric + +module unload cray-hdf5 +module unload cray-hdf5-parallel +module unload cray-netcdf-hdf5parallel +module unload cray-parallel-netcdf +module unload netcdf +module load cray-netcdf/4.8.1.1 +module load cray-hdf5/1.12.1.1 + +limit coredumpsize unlimited +limit stacksize unlimited +setenv OMP_STACKSIZE 128M +setenv OMP_WAIT_POLICY PASSIVE + +endif + +setenv ICE_MACHINE_MACHNAME narwhal +setenv ICE_MACHINE_MACHINFO "Cray EX AMD EPYC 7H12 2.6GHz, Slingshot-10 Interconnect" +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "ifort 2021.4.0 20210910, hpcx2.9.0 openmpi, netcdf/4.8.1.1" +setenv ICE_MACHINE_MAKE gmake +setenv ICE_MACHINE_WKDIR $WORKDIR/CICE_RUNS +setenv ICE_MACHINE_INPUTDATA /p/work1/projects/RASM/cice_consortium +setenv ICE_MACHINE_BASELINE $WORKDIR/CICE_BASELINE +setenv ICE_MACHINE_SUBMIT "qsub " +setenv ICE_MACHINE_ACCT P00000000 +setenv ICE_MACHINE_QUEUE "debug" +setenv ICE_MACHINE_TPNODE 128 # tasks per node +setenv ICE_MACHINE_BLDTHRDS 12 +setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/tests/baseline.script b/configuration/scripts/tests/baseline.script index bb8f50a1f..2700fe71f 100644 --- a/configuration/scripts/tests/baseline.script +++ b/configuration/scripts/tests/baseline.script @@ -29,7 +29,7 @@ if (${ICE_BASECOM} != ${ICE_SPVAL}) then set base_file = `ls -1t ${ICE_BASELINE}/${ICE_BASECOM}/${ICE_TESTNAME}/cice.runlog* | head -1` echo "" - echo "bfb Log Compare Mode:" + echo "Regression Log Compare Mode:" echo "base_file: ${base_file}" echo "test_file: ${test_file}" @@ -58,7 +58,7 @@ if (${ICE_BASECOM} != ${ICE_SPVAL}) then endif echo "" - echo "Regression Compare Mode:" + echo "Regression Restart Compare Mode:" echo "base_dir: ${base_dir}" echo "test_dir: ${test_dir}" @@ -71,7 +71,7 @@ if (${ICE_BASECOM} != ${ICE_SPVAL}) then set base_file = `ls -1t ${ICE_BASELINE}/${ICE_BASECOM}/${ICE_TESTNAME}/cice.runlog* | head -1` echo "" - echo "bfb Log Compare Mode:" + echo "Regression Log Compare Mode:" echo "base_file: ${base_file}" echo "test_file: ${test_file}" From 670d2f26f76dac276ff3c88dc7f8c6193f336517 Mon Sep 17 00:00:00 2001 From: Lettie Roach Date: Tue, 1 Nov 2022 16:06:58 -0400 Subject: [PATCH 31/57] Add time-varying wave forcing, add warning for FSD without waves, make FSD tendencies per second (#775) * Correct units in history output * Add warning for no waves with FSD, make FSD tendencies per s * Add capability for time-varying wave forcing * Make FSD history output use aice, not aice_init --- cicecore/cicedynB/analysis/ice_history.F90 | 2 +- .../cicedynB/analysis/ice_history_fsd.F90 | 45 +++-- cicecore/cicedynB/general/ice_forcing.F90 | 191 +++++++++++++++++- cicecore/cicedynB/general/ice_init.F90 | 15 +- 4 files changed, 219 insertions(+), 34 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedynB/analysis/ice_history.F90 index caaa56295..f5e7d0d16 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedynB/analysis/ice_history.F90 @@ -3546,7 +3546,7 @@ subroutine accum_hist (dt) call accum_hist_drag (iblk) ! floe size distribution - call accum_hist_fsd (iblk) + call accum_hist_fsd (dt, iblk) ! advanced snow physics call accum_hist_snow (iblk) diff --git a/cicecore/cicedynB/analysis/ice_history_fsd.F90 b/cicecore/cicedynB/analysis/ice_history_fsd.F90 index 18e936e13..b52db4e05 100644 --- a/cicecore/cicedynB/analysis/ice_history_fsd.F90 +++ b/cicecore/cicedynB/analysis/ice_history_fsd.F90 @@ -220,23 +220,23 @@ subroutine init_hist_fsd_3Df "areal floe size distribution", & "per unit bin width ", c1, c0, ns, f_afsd) if (f_dafsd_newi(1:1) /= 'x') & - call define_hist_field(n_dafsd_newi,"dafsd_newi","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_newi,"dafsd_newi","1/s",tstr3Df, tcstr, & "Change in fsd: new ice", & "Avg over freq period", c1, c0, ns, f_dafsd_newi) if (f_dafsd_latg(1:1) /= 'x') & - call define_hist_field(n_dafsd_latg,"dafsd_latg","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_latg,"dafsd_latg","1/s",tstr3Df, tcstr, & "Change in fsd: lateral growth", & "Avg over freq period", c1, c0, ns, f_dafsd_latg) if (f_dafsd_latm(1:1) /= 'x') & - call define_hist_field(n_dafsd_latm,"dafsd_latm","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_latm,"dafsd_latm","1/s",tstr3Df, tcstr, & "Change in fsd: lateral melt", & "Avg over freq period", c1, c0, ns, f_dafsd_latm) if (f_dafsd_wave(1:1) /= 'x') & - call define_hist_field(n_dafsd_wave,"dafsd_wave","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_wave,"dafsd_wave","1/s",tstr3Df, tcstr, & "Change in fsd: waves", & "Avg over freq period", c1, c0, ns, f_dafsd_wave) if (f_dafsd_weld(1:1) /= 'x') & - call define_hist_field(n_dafsd_weld,"dafsd_weld","1",tstr3Df, tcstr, & + call define_hist_field(n_dafsd_weld,"dafsd_weld","1/s",tstr3Df, tcstr, & "Change in fsd: welding", & "Avg over freq period", c1, c0, ns, f_dafsd_weld) endif ! if (histfreq(ns) /= 'x') @@ -288,16 +288,19 @@ end subroutine init_hist_fsd_4Df ! accumulate average ice quantities or snapshots ! author: Elizabeth C. Hunke, LANL - subroutine accum_hist_fsd (iblk) + subroutine accum_hist_fsd (dt, iblk) use ice_blocks, only: nx_block, ny_block use ice_constants, only: c0, c1, c2, c4 use ice_history_shared, only: a2D, a3Df, a4Df, nfsd_hist, & ncat_hist, accum_hist_field, n3Dacum, n4Dscum - use ice_state, only: trcrn, aicen_init, vicen, aice_init + use ice_state, only: trcrn, aicen, vicen, aice use ice_arrays_column, only: wave_sig_ht, floe_rad_c, floe_binwidth, & d_afsd_newi, d_afsd_latg, d_afsd_latm, d_afsd_wave, d_afsd_weld + real (kind=dbl_kind), intent(in) :: & + dt ! time step + integer (kind=int_kind), intent(in) :: & iblk ! block index @@ -342,7 +345,7 @@ subroutine accum_hist_fsd (iblk) worka(i,j) = c0 do n = 1, ncat_hist do k = 1, nfsd_hist - worka(i,j) = worka(i,j) + aicen_init(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) + worka(i,j) = worka(i,j) + aicen(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) end do end do end do @@ -357,7 +360,7 @@ subroutine accum_hist_fsd (iblk) workb = c0 do n = 1, ncat_hist do k = 1, nfsd_hist - workc = aicen_init(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) & + workc = aicen(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) & / (c4*floeshape*floe_rad_c(k)**2) ! number-mean radius worka(i,j) = worka(i,j) + workc * floe_rad_c(k) @@ -380,7 +383,7 @@ subroutine accum_hist_fsd (iblk) workb = c0 do n = 1, ncat_hist do k = 1, nfsd_hist - workb = workb + aicen_init(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) + workb = workb + aicen(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) worka(i,j) = worka(i,j) + vicen(i,j,n,iblk)*trcrn(i,j,nt_fsd+k-1,n,iblk) end do end do @@ -398,12 +401,12 @@ subroutine accum_hist_fsd (iblk) do j = 1, ny_block do i = 1, nx_block worka(i,j) = c0 - if (aice_init(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then do k = 1, nfsd_hist do n = 1, ncat_hist worka(i,j) = worka(i,j) & + (trcrn(i,j,nt_fsd+k-1,n,iblk) * floe_rad_c(k) & - * aicen_init(i,j,n,iblk)/aice_init(i,j,iblk)) + * aicen(i,j,n,iblk)/aice(i,j,iblk)) end do end do endif @@ -416,12 +419,12 @@ subroutine accum_hist_fsd (iblk) do j = 1, ny_block do i = 1, nx_block worka(i,j) = c0 - if (aice_init(i,j,iblk) > puny) then + if (aice(i,j,iblk) > puny) then do k = 1, nfsd_hist do n = 1, ncat_hist worka(i,j) = worka(i,j) & + (c8*floeshape*trcrn(i,j,nt_fsd+k-1,n,iblk)*floe_rad_c(k) & - *aicen_init(i,j,n,iblk)/(c4*floeshape*floe_rad_c(k)**2 *aice_init(i,j,iblk))) + *aicen(i,j,n,iblk)/(c4*floeshape*floe_rad_c(k)**2 *aice(i,j,iblk))) end do end do endif @@ -442,7 +445,7 @@ subroutine accum_hist_fsd (iblk) worke(i,j,k)=c0 do n = 1, ncat_hist worke(i,j,k) = worke(i,j,k) + (trcrn(i,j,nt_fsd+k-1,n,iblk) & - * aicen_init(i,j,n,iblk)/floe_binwidth(k)) + * aicen(i,j,n,iblk)/floe_binwidth(k)) end do end do end do @@ -452,19 +455,19 @@ subroutine accum_hist_fsd (iblk) if (f_dafsd_newi(1:1)/= 'x') & call accum_hist_field(n_dafsd_newi-n3Dacum, iblk, nfsd_hist, & - d_afsd_newi(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_newi(:,:,1:nfsd_hist,iblk)/dt, a3Df) if (f_dafsd_latg(1:1)/= 'x') & call accum_hist_field(n_dafsd_latg-n3Dacum, iblk, nfsd_hist, & - d_afsd_latg(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_latg(:,:,1:nfsd_hist,iblk)/dt, a3Df) if (f_dafsd_latm(1:1)/= 'x') & call accum_hist_field(n_dafsd_latm-n3Dacum, iblk, nfsd_hist, & - d_afsd_latm(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_latm(:,:,1:nfsd_hist,iblk)/dt, a3Df) if (f_dafsd_wave(1:1)/= 'x') & call accum_hist_field(n_dafsd_wave-n3Dacum, iblk, nfsd_hist, & - d_afsd_wave(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_wave(:,:,1:nfsd_hist,iblk)/dt, a3Df) if (f_dafsd_weld(1:1)/= 'x') & call accum_hist_field(n_dafsd_weld-n3Dacum, iblk, nfsd_hist, & - d_afsd_weld(:,:,1:nfsd_hist,iblk), a3Df) + d_afsd_weld(:,:,1:nfsd_hist,iblk)/dt, a3Df) endif ! a3Df allocated ! 4D floe size, thickness category fields @@ -476,7 +479,7 @@ subroutine accum_hist_fsd (iblk) do j = 1, ny_block do i = 1, nx_block workd(i,j,k,n) = trcrn(i,j,nt_fsd+k-1,n,iblk) & - * aicen_init(i,j,n,iblk)/floe_binwidth(k) + * aicen(i,j,n,iblk)/floe_binwidth(k) end do end do end do diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index 381686c9b..ff79778c5 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -22,7 +22,7 @@ module ice_forcing use ice_boundary, only: ice_HaloUpdate use ice_blocks, only: nx_block, ny_block use ice_domain, only: halo_info - use ice_domain_size, only: ncat, max_blocks, nx_global, ny_global + use ice_domain_size, only: ncat, max_blocks, nx_global, ny_global, nfreq use ice_communicate, only: my_task, master_task use ice_calendar, only: istep, istep1, & msec, mday, mmonth, myear, yday, daycal, & @@ -116,6 +116,9 @@ module ice_forcing topmelt_data, & botmelt_data + real (kind=dbl_kind), dimension(:,:,:,:,:), allocatable :: & + wave_spectrum_data ! field values at 2 temporal data points + character(char_len), public :: & atm_data_format, & ! 'bin'=binary or 'nc'=netcdf ocn_data_format, & ! 'bin'=binary or 'nc'=netcdf @@ -229,6 +232,7 @@ subroutine alloc_forcing ocn_frc_m(nx_block,ny_block, max_blocks,nfld,12), & ! ocn data for 12 months topmelt_file(ncat), & botmelt_file(ncat), & + wave_spectrum_data(nx_block,ny_block,nfreq,2,max_blocks), & stat=ierr) if (ierr/=0) call abort_ice('(alloc_forcing): Out of Memory') @@ -1527,6 +1531,44 @@ subroutine interpolate_data (field_data, field) end subroutine interpolate_data +!======================================================================= + + subroutine interpolate_wavespec_data (field_data, field) + +! Linear interpolation + +! author: Elizabeth C. Hunke, LANL + + use ice_domain, only: nblocks + + real (kind=dbl_kind), dimension(nx_block,ny_block,nfreq,2,max_blocks), intent(in) :: & + field_data ! 2 values used for interpolation + + real (kind=dbl_kind), dimension(nx_block,ny_block,nfreq,max_blocks), intent(out) :: & + field ! interpolated field + + ! local variables + + integer (kind=int_kind) :: i,j, iblk, freq + + character(len=*), parameter :: subname = '(interpolate data)' + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + do freq = 1, nfreq + field(i,j,freq,iblk) = c1intp * field_data(i,j,freq,1,iblk) & + + c2intp * field_data(i,j,freq,2,iblk) + enddo + enddo + enddo + enddo + !$OMP END PARALLEL DO + + end subroutine interpolate_wavespec_data + + !======================================================================= subroutine file_year (data_file, yr) @@ -5566,7 +5608,6 @@ subroutine get_wave_spec use ice_arrays_column, only: wave_spectrum, & dwavefreq, wavefreq use ice_constants, only: c0 - use ice_domain_size, only: nfreq use ice_timers, only: ice_timer_start, ice_timer_stop, timer_fsd ! local variables @@ -5592,7 +5633,6 @@ subroutine get_wave_spec ! if no wave data is provided, wave_spectrum is zero everywhere wave_spectrum(:,:,:,:) = c0 - wave_spec_dir = ocn_data_dir debug_forcing = .false. ! wave spectrum and frequencies @@ -5610,10 +5650,7 @@ subroutine get_wave_spec file=__FILE__, line=__LINE__) else #ifdef USE_NETCDF - call ice_open_nc(wave_spec_file,fid) - call ice_read_nc_xyf (fid, 1, 'efreq', wave_spectrum(:,:,:,:), debug_forcing, & - field_loc_center, field_type_scalar) - call ice_close_nc(fid) + call wave_spec_data #else write (nu_diag,*) "wave spectrum file not available, requires cpp USE_NETCDF" write (nu_diag,*) "wave spectrum file not available, using default profile" @@ -5628,6 +5665,146 @@ subroutine get_wave_spec end subroutine get_wave_spec +!======================================================================= +! +! Read in wave spectrum forcing as a function of time. 6 hourly +! LR started working from JRA55_data routine +! Changed fields, and changed 3 hourly to 6 hourly +! + subroutine wave_spec_data + + use ice_blocks, only: block, get_block + use ice_global_reductions, only: global_minval, global_maxval + use ice_domain, only: nblocks, distrb_info, blocks_ice + use ice_arrays_column, only: wave_spectrum, & + dwavefreq, wavefreq + use ice_read_write, only: ice_read_nc_xyf + use ice_grid, only: hm, tlon, tlat, tmask, umask + use ice_calendar, only: days_per_year, use_leap_years + + integer (kind=int_kind) :: & + ncid , & ! netcdf file id + i, j, freq , & + ixm,ixx,ixp , & ! record numbers for neighboring months + recnum , & ! record number + maxrec , & ! maximum record number + recslot , & ! spline slot for current record + midmonth , & ! middle day of month + dataloc , & ! = 1 for data located in middle of time interval + ! = 2 for date located at end of time interval + iblk , & ! block index + ilo,ihi,jlo,jhi, & ! beginning and end of physical domain + yr ! current forcing year + + real (kind=dbl_kind) :: & + sec6hr , & ! number of seconds in 3 hours + secday , & ! number of seconds in day + vmin, vmax + + logical (kind=log_kind) :: readm, read6,debug_n_d + + type (block) :: & + this_block ! block information for current block + + real(kind=dbl_kind), dimension(nfreq) :: & + wave_spectrum_profile ! wave spectrum + + character(len=64) :: fieldname !netcdf field name + character(char_len_long) :: spec_file + character(char_len) :: wave_spec_type + logical (kind=log_kind) :: wave_spec + character(len=*), parameter :: subname = '(wave_spec_data)' + + + + debug_n_d = .false. !usually false + + call icepack_query_parameters(secday_out=secday) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + call icepack_init_wave(nfreq, & + wave_spectrum_profile, & + wavefreq, dwavefreq) + + + !spec_file = trim(ocn_data_dir)//'/'//trim(wave_spec_file) + spec_file = trim(wave_spec_file) + wave_spectrum_data = c0 + wave_spectrum = c0 + yr = fyear ! current year + !------------------------------------------------------------------- + ! 6-hourly data + ! + ! Assume that the 6-hourly value is located at the end of the + ! 6-hour period. This is the convention for NCEP reanalysis data. + ! E.g. record 1 gives conditions at 6 am GMT on 1 January. + !------------------------------------------------------------------- + + dataloc = 2 ! data located at end of interval + sec6hr = secday/c4 ! seconds in 6 hours + !maxrec = 2920 ! 365*8; for leap years = 366*8 + + if (use_leap_years) days_per_year = 366 !overrides setting of 365 in ice_calendar + maxrec = days_per_year*4 + + if(days_per_year == 365 .and. (mod(yr, 4) == 0)) then + call abort_ice('days_per_year should be set to 366 for leap years') + end if + + ! current record number + recnum = 4*int(yday) - 3 + int(real(msec,kind=dbl_kind)/sec6hr) + + ! Compute record numbers for surrounding data (2 on each side) + + ixm = mod(recnum+maxrec-2,maxrec) + 1 + ixx = mod(recnum-1, maxrec) + 1 + + ! Compute interpolation coefficients + ! If data is located at the end of the time interval, then the + ! data value for the current record goes in slot 2 + + recslot = 2 + ixp = -99 + call interp_coeff (recnum, recslot, sec6hr, dataloc) + + ! Read + read6 = .false. + if (istep==1 .or. oldrecnum .ne. recnum) read6 = .true. + !------------------------------------------------------------------- + ! File is NETCDF + ! file variable names are: + ! efreq (wave spectrum, energy as a function of wave frequency UNITS) + !------------------------------------------------------------------- + call ice_open_nc(spec_file,ncid) + + call ice_read_nc_xyf(ncid,recnum,'efreq',wave_spectrum_data(:,:,:,1,:),debug_n_d, & + field_loc=field_loc_center, & + field_type=field_type_scalar) + call ice_read_nc_xyf(ncid,recnum,'efreq',wave_spectrum_data(:,:,:,2,:),debug_n_d, & + field_loc=field_loc_center, & + field_type=field_type_scalar) + call ice_close_nc(ncid) + + + ! Interpolate + call interpolate_wavespec_data (wave_spectrum_data, wave_spectrum) + + ! Save record number + oldrecnum = recnum + + if (local_debug) then + if (my_task == master_task) write (nu_diag,*) & + 'wave_spec_data ',spec_file + if (my_task.eq.master_task) & + write (nu_diag,*) 'maxrec',maxrec + write (nu_diag,*) 'days_per_year', days_per_year + + endif ! local debug + + end subroutine wave_spec_data + !======================================================================= ! initial snow aging lookup table diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 0b9a62542..a6c4d7941 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -1583,7 +1583,12 @@ subroutine input_data wave_spec = .false. if (tr_fsd .and. (trim(wave_spec_type) /= 'none')) wave_spec = .true. - + if (tr_fsd .and. (trim(wave_spec_type) == 'none')) then + if (my_task == master_task) then + write(nu_diag,*) subname//' WARNING: tr_fsd=T but wave_spec=F - not recommended' + endif + end if + ! compute grid locations for thermo, u and v fields grid_ice_thrm = 'T' @@ -2075,18 +2080,18 @@ subroutine input_data if (wave_spec) then tmpstr2 = ' : use wave spectrum for floe size distribution' else - tmpstr2 = ' : floe size distribution does not use wave spectrum' + tmpstr2 = 'WARNING : floe size distribution does not use wave spectrum' endif write(nu_diag,1010) ' wave_spec = ', wave_spec,trim(tmpstr2) if (wave_spec) then if (trim(wave_spec_type) == 'none') then tmpstr2 = ' : no wave data provided, no wave-ice interactions' elseif (trim(wave_spec_type) == 'profile') then - tmpstr2 = ' : use fixed dummy wave spectrum for testing' + tmpstr2 = ' : use fixed dummy wave spectrum for testing, sea surface height generated using constant phase (1 iteration of wave fracture)' elseif (trim(wave_spec_type) == 'constant') then - tmpstr2 = ' : constant wave spectrum data file provided for testing' + tmpstr2 = ' : wave spectrum data file provided, sea surface height generated using constant phase (1 iteration of wave fracture)' elseif (trim(wave_spec_type) == 'random') then - tmpstr2 = ' : wave data file provided, spectrum generated using random number' + tmpstr2 = ' : wave spectrum data file provided, sea surface height generated using random number (multiple iterations of wave fracture to convergence)' else tmpstr2 = ' : unknown value' endif From 3820cde643af1ec4354f2a6f727c98ff120559b2 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 1 Nov 2022 15:21:54 -0700 Subject: [PATCH 32/57] Port to gust intel and cray (#781) --- .../cicedynB/dynamics/ice_transport_remap.F90 | 1 + configuration/scripts/cice.batch.csh | 13 ++++ configuration/scripts/cice.launch.csh | 12 ++++ .../scripts/machines/Macros.gust_cray | 71 ++++++++++++++++++ .../scripts/machines/Macros.gust_intel | 68 ++++++++++++++++++ .../scripts/machines/env.badger_intel | 0 .../scripts/machines/env.banting_gnu | 0 .../scripts/machines/env.banting_intel | 0 .../scripts/machines/env.cheyenne_gnu | 0 .../scripts/machines/env.cheyenne_intel | 0 .../scripts/machines/env.cheyenne_pgi | 0 .../scripts/machines/env.compy_intel | 0 .../scripts/machines/env.conda_linux | 0 .../scripts/machines/env.conda_macos | 0 configuration/scripts/machines/env.cori_intel | 0 configuration/scripts/machines/env.daley_gnu | 0 .../scripts/machines/env.daley_intel | 0 configuration/scripts/machines/env.fram_intel | 0 configuration/scripts/machines/env.freya_gnu | 0 .../scripts/machines/env.freya_intel | 0 configuration/scripts/machines/env.gaea_intel | 0 .../scripts/machines/env.gaffney_gnu | 0 .../scripts/machines/env.gaffney_intel | 0 configuration/scripts/machines/env.gust_cray | 72 +++++++++++++++++++ configuration/scripts/machines/env.gust_intel | 72 +++++++++++++++++++ configuration/scripts/machines/env.hera_intel | 0 .../scripts/machines/env.high_Sierra_gnu | 0 .../scripts/machines/env.hobart_intel | 0 configuration/scripts/machines/env.hobart_nag | 0 configuration/scripts/machines/env.izumi_gnu | 0 .../scripts/machines/env.izumi_intel | 0 configuration/scripts/machines/env.izumi_nag | 0 configuration/scripts/machines/env.izumi_pgi | 0 .../scripts/machines/env.koehr_intel | 0 .../scripts/machines/env.mustang_intel18 | 0 .../scripts/machines/env.mustang_intel19 | 0 .../scripts/machines/env.mustang_intel20 | 0 .../scripts/machines/env.narwhal_aocc | 0 .../scripts/machines/env.narwhal_cray | 0 .../scripts/machines/env.narwhal_gnu | 0 .../scripts/machines/env.narwhal_intel | 0 .../scripts/machines/env.narwhal_intelhpcx | 0 configuration/scripts/machines/env.nrlssc_gnu | 0 configuration/scripts/machines/env.onyx_cray | 0 configuration/scripts/machines/env.onyx_gnu | 0 configuration/scripts/machines/env.onyx_intel | 0 .../scripts/machines/env.orion_intel | 0 .../scripts/machines/env.phase3_intel | 0 .../scripts/machines/env.testmachine_intel | 0 .../scripts/machines/env.travisCI_gnu | 0 50 files changed, 309 insertions(+) create mode 100644 configuration/scripts/machines/Macros.gust_cray create mode 100644 configuration/scripts/machines/Macros.gust_intel mode change 100755 => 100644 configuration/scripts/machines/env.badger_intel mode change 100755 => 100644 configuration/scripts/machines/env.banting_gnu mode change 100755 => 100644 configuration/scripts/machines/env.banting_intel mode change 100755 => 100644 configuration/scripts/machines/env.cheyenne_gnu mode change 100755 => 100644 configuration/scripts/machines/env.cheyenne_intel mode change 100755 => 100644 configuration/scripts/machines/env.cheyenne_pgi mode change 100755 => 100644 configuration/scripts/machines/env.compy_intel mode change 100755 => 100644 configuration/scripts/machines/env.conda_linux mode change 100755 => 100644 configuration/scripts/machines/env.conda_macos mode change 100755 => 100644 configuration/scripts/machines/env.cori_intel mode change 100755 => 100644 configuration/scripts/machines/env.daley_gnu mode change 100755 => 100644 configuration/scripts/machines/env.daley_intel mode change 100755 => 100644 configuration/scripts/machines/env.fram_intel mode change 100755 => 100644 configuration/scripts/machines/env.freya_gnu mode change 100755 => 100644 configuration/scripts/machines/env.freya_intel mode change 100755 => 100644 configuration/scripts/machines/env.gaea_intel mode change 100755 => 100644 configuration/scripts/machines/env.gaffney_gnu mode change 100755 => 100644 configuration/scripts/machines/env.gaffney_intel create mode 100644 configuration/scripts/machines/env.gust_cray create mode 100644 configuration/scripts/machines/env.gust_intel mode change 100755 => 100644 configuration/scripts/machines/env.hera_intel mode change 100755 => 100644 configuration/scripts/machines/env.high_Sierra_gnu mode change 100755 => 100644 configuration/scripts/machines/env.hobart_intel mode change 100755 => 100644 configuration/scripts/machines/env.hobart_nag mode change 100755 => 100644 configuration/scripts/machines/env.izumi_gnu mode change 100755 => 100644 configuration/scripts/machines/env.izumi_intel mode change 100755 => 100644 configuration/scripts/machines/env.izumi_nag mode change 100755 => 100644 configuration/scripts/machines/env.izumi_pgi mode change 100755 => 100644 configuration/scripts/machines/env.koehr_intel mode change 100755 => 100644 configuration/scripts/machines/env.mustang_intel18 mode change 100755 => 100644 configuration/scripts/machines/env.mustang_intel19 mode change 100755 => 100644 configuration/scripts/machines/env.mustang_intel20 mode change 100755 => 100644 configuration/scripts/machines/env.narwhal_aocc mode change 100755 => 100644 configuration/scripts/machines/env.narwhal_cray mode change 100755 => 100644 configuration/scripts/machines/env.narwhal_gnu mode change 100755 => 100644 configuration/scripts/machines/env.narwhal_intel mode change 100755 => 100644 configuration/scripts/machines/env.narwhal_intelhpcx mode change 100755 => 100644 configuration/scripts/machines/env.nrlssc_gnu mode change 100755 => 100644 configuration/scripts/machines/env.onyx_cray mode change 100755 => 100644 configuration/scripts/machines/env.onyx_gnu mode change 100755 => 100644 configuration/scripts/machines/env.onyx_intel mode change 100755 => 100644 configuration/scripts/machines/env.orion_intel mode change 100755 => 100644 configuration/scripts/machines/env.phase3_intel mode change 100755 => 100644 configuration/scripts/machines/env.testmachine_intel mode change 100755 => 100644 configuration/scripts/machines/env.travisCI_gnu diff --git a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 index 6fd037b7b..286a51711 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_remap.F90 @@ -633,6 +633,7 @@ subroutine horizontal_remap (dt, ntrace, & endif ! nghost + ! tcraig, this OMP loop sometimes fails with cce/14.0.3, compiler bug?? !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,n, & !$OMP edgearea_e,edgearea_n,edge,iflux,jflux, & !$OMP xp,yp,indxing,indxjng,mflxe,mflxn, & diff --git a/configuration/scripts/cice.batch.csh b/configuration/scripts/cice.batch.csh index d737f78ba..1cf23da45 100755 --- a/configuration/scripts/cice.batch.csh +++ b/configuration/scripts/cice.batch.csh @@ -33,6 +33,19 @@ cat >> ${jobfile} << EOFB #PBS -l walltime=${batchtime} EOFB +else if (${ICE_MACHINE} =~ gust*) then +cat >> ${jobfile} << EOFB +#PBS -q ${queue} +#PBS -l job_priority=regular +#PBS -N ${ICE_CASENAME} +#PBS -A ${acct} +#PBS -l select=${nnodes}:ncpus=${corespernode}:mpiprocs=${taskpernodelimit}:ompthreads=${nthrds} +#PBS -l walltime=${batchtime} +#PBS -j oe +#PBS -W umask=022 +#PBS -o ${ICE_CASEDIR} +EOFB + else if (${ICE_MACHINE} =~ hobart*) then cat >> ${jobfile} << EOFB #PBS -j oe diff --git a/configuration/scripts/cice.launch.csh b/configuration/scripts/cice.launch.csh index 28e020efc..b13da1813 100755 --- a/configuration/scripts/cice.launch.csh +++ b/configuration/scripts/cice.launch.csh @@ -22,6 +22,18 @@ mpiexec_mpt -np ${ntasks} omplace ./cice >&! \$ICE_RUNLOG_FILE EOFR endif +#======= +else if (${ICE_MACHCOMP} =~ gust*) then +if (${ICE_COMMDIR} =~ serial*) then +cat >> ${jobfile} << EOFR +./cice >&! \$ICE_RUNLOG_FILE +EOFR +else +cat >> ${jobfile} << EOFR +mpiexec --cpu-bind depth -n ${ntasks} -ppn ${taskpernodelimit} -d ${nthrds} ./cice >&! \$ICE_RUNLOG_FILE +EOFR +endif + #======= else if (${ICE_MACHCOMP} =~ hobart* || ${ICE_MACHCOMP} =~ izumi*) then if (${ICE_COMMDIR} =~ serial*) then diff --git a/configuration/scripts/machines/Macros.gust_cray b/configuration/scripts/machines/Macros.gust_cray new file mode 100644 index 000000000..eadc14884 --- /dev/null +++ b/configuration/scripts/machines/Macros.gust_cray @@ -0,0 +1,71 @@ +#============================================================================== +# Makefile macros for NCAR cheyenne, intel compiler +#============================================================================== + +CPP := ftn -e P +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 + +FIXEDFLAGS := -132 +FREEFLAGS := +FFLAGS := -hbyteswapio +FFLAGS_NOOPT:= -O0 +LDFLAGS := -hbyteswapio + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -hfp0 -g -Rbcdps -Ktrap=fp +else + FFLAGS += -O2 -hfp0 +endif + +SCC := cc +SFC := ftn +MPICC := cc +MPIFC := ftn + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +NETCDF_PATH := $(NETCDF) + +PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs + +PNETCDF_PATH := $(PNETCDF) +#PNETCDF_PATH := /glade/u/apps/ch/opt/pio/2.2/mpt/2.15f/intel/17.0.1/lib + +INCLDIR := $(INCLDIR) + +LIB_NETCDF := $(NETCDF)/lib +#LIB_PNETCDF := $(PNETCDF_PATH)/lib +#LIB_MPI := $(IMPILIBDIR) + +#SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff -L$(LIB_PNETCDF) -lpnetcdf -lgptl +SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -fopenmp + CFLAGS += -fopenmp + FFLAGS += -fopenmp +else + LDFLAGS += -hnoomp +# CFLAGS += -hnoomp + FFLAGS += -hnoomp +endif + +#ifeq ($(ICE_IOTYPE), pio1) +# LIB_PIO := $(PIO_LIBDIR) +# SLIBS := $(SLIBS) -L$(LIB_PIO) -lpio +#endif + +ifeq ($(ICE_IOTYPE), pio2) + CPPDEFS := $(CPPDEFS) -DGPTL +# LIB_PIO := $(PIO_LIBDIR) +# SLIBS := $(SLIBS) -L$(LIB_PIO) -lpiof -lpioc -lgptl +endif + diff --git a/configuration/scripts/machines/Macros.gust_intel b/configuration/scripts/machines/Macros.gust_intel new file mode 100644 index 000000000..18d5d10d0 --- /dev/null +++ b/configuration/scripts/machines/Macros.gust_intel @@ -0,0 +1,68 @@ +#============================================================================== +# Makefile macros for NCAR cheyenne, intel compiler +#============================================================================== + +CPP := fpp +CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CFLAGS := -c -O2 -fp-model precise + +FIXEDFLAGS := -132 +FREEFLAGS := -FR +FFLAGS := -fp-model precise -convert big_endian -assume byterecl -ftz -traceback +FFLAGS_NOOPT:= -O0 + +ifeq ($(ICE_BLDDEBUG), true) + FFLAGS += -O0 -g -check uninit -check bounds -check pointers -fpe0 -check noarg_temp_created -link_mpi=dbg +# FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -link_mpi=dbg -stand f08 +# FFLAGS += -O0 -g -check all -fpe0 -ftrapuv -fp-model except -check noarg_temp_created -init=snan,arrays -link_mpi=dbg +else + FFLAGS += -O2 +endif + +SCC := cc +SFC := ftn +MPICC := cc +MPIFC := ftn + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +NETCDF_PATH := $(NETCDF) + +PIO_CONFIG_OPTS:= --enable-filesystem-hints=gpfs + +PNETCDF_PATH := $(PNETCDF) +#PNETCDF_PATH := /glade/u/apps/ch/opt/pio/2.2/mpt/2.15f/intel/17.0.1/lib + +INCLDIR := $(INCLDIR) + +LIB_NETCDF := $(NETCDF)/lib +#LIB_PNETCDF := $(PNETCDF_PATH)/lib +#LIB_MPI := $(IMPILIBDIR) + +#SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff -L$(LIB_PNETCDF) -lpnetcdf -lgptl +SLIBS := -L$(LIB_NETCDF) -lnetcdf -lnetcdff + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -qopenmp + CFLAGS += -qopenmp + FFLAGS += -qopenmp +endif + +#ifeq ($(ICE_IOTYPE), pio1) +# LIB_PIO := $(PIO_LIBDIR) +# SLIBS := $(SLIBS) -L$(LIB_PIO) -lpio +#endif + +ifeq ($(ICE_IOTYPE), pio2) + CPPDEFS := $(CPPDEFS) -DGPTL +# LIB_PIO := $(PIO_LIBDIR) +# SLIBS := $(SLIBS) -L$(LIB_PIO) -lpiof -lpioc -lgptl +endif + diff --git a/configuration/scripts/machines/env.badger_intel b/configuration/scripts/machines/env.badger_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.banting_gnu b/configuration/scripts/machines/env.banting_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.banting_intel b/configuration/scripts/machines/env.banting_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.cheyenne_gnu b/configuration/scripts/machines/env.cheyenne_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.cheyenne_intel b/configuration/scripts/machines/env.cheyenne_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.cheyenne_pgi b/configuration/scripts/machines/env.cheyenne_pgi old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.compy_intel b/configuration/scripts/machines/env.compy_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.conda_linux b/configuration/scripts/machines/env.conda_linux old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.conda_macos b/configuration/scripts/machines/env.conda_macos old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.cori_intel b/configuration/scripts/machines/env.cori_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.daley_gnu b/configuration/scripts/machines/env.daley_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.daley_intel b/configuration/scripts/machines/env.daley_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.fram_intel b/configuration/scripts/machines/env.fram_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.freya_gnu b/configuration/scripts/machines/env.freya_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.freya_intel b/configuration/scripts/machines/env.freya_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.gaea_intel b/configuration/scripts/machines/env.gaea_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.gaffney_gnu b/configuration/scripts/machines/env.gaffney_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.gaffney_intel b/configuration/scripts/machines/env.gaffney_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.gust_cray b/configuration/scripts/machines/env.gust_cray new file mode 100644 index 000000000..8246cd9b9 --- /dev/null +++ b/configuration/scripts/machines/env.gust_cray @@ -0,0 +1,72 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +source ${MODULESHOME}/init/csh + +module --force purge +module load ncarenv/22.10 +module load craype/2.7.17 +module load cce/14.0.3 +module load ncarcompilers/0.7.1 +module load cray-mpich/8.1.19 +module load hdf5/1.12.2 +module load netcdf/4.8.1 +module load cray-libsci/22.08.1.1 + +if ($?ICE_IOTYPE) then +if ($ICE_IOTYPE =~ pio*) then + module unload netcdf + module load netcdf-mpi/4.8.1 + module load pnetcdf/1.12.2 + if ($ICE_IOTYPE == "pio1") then + module load pio/1.10.1 + else + module load pio/2.5.9 + endif +endif +endif + +if ($?ICE_BFBTYPE) then +if ($ICE_BFBTYPE =~ qcchk*) then + module load conda +# conda env create -f ../../configuration/scripts/tests/qctest.yml + conda activate qctest +endif +endif + +# For perftools with mpiexec +# module load perftools-base +# module load perftools +#setenv PALS_TRANSFER FALSE + +endif + +limit coredumpsize unlimited +limit stacksize unlimited +setenv PALS_QUIET TRUE + +# May be needed for OpenMP memory +setenv OMP_STACKSIZE 64M +# OMP runtime diagnostics +#setenv OMP_DISPLAY_ENV TRUE + +setenv ICE_MACHINE_MACHNAME gust +setenv ICE_MACHINE_MACHINFO "Cray XE Milan Slingshot 11" +setenv ICE_MACHINE_ENVNAME crayg +setenv ICE_MACHINE_ENVINFO "cce 14.0.3, cray-mpich 2.19, netcdf4.8.1, pnetcdf1.12.2, pio2.5.9" +setenv ICE_MACHINE_MAKE gmake +setenv ICE_MACHINE_WKDIR /glade/gust/scratch/$user/CICE_RUNS +setenv ICE_MACHINE_INPUTDATA /glade/work/tcraig/cice-consortium-data +setenv ICE_MACHINE_BASELINE /glade/gust/scratch/$user/CICE_BASELINE +setenv ICE_MACHINE_SUBMIT "qsub" +setenv ICE_MACHINE_ACCT P00000000 +setenv ICE_MACHINE_QUEUE "main" +setenv ICE_MACHINE_TPNODE 128 +setenv ICE_MACHINE_BLDTHRDS 8 +setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.gust_intel b/configuration/scripts/machines/env.gust_intel new file mode 100644 index 000000000..08a99b940 --- /dev/null +++ b/configuration/scripts/machines/env.gust_intel @@ -0,0 +1,72 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + +source ${MODULESHOME}/init/csh + +module --force purge +module load ncarenv/22.10 +module load craype/2.7.17 +module load intel/2021.6.0 +module load ncarcompilers/0.7.1 +module load cray-mpich/8.1.19 +module load hdf5/1.12.2 +module load netcdf/4.8.1 +module load cray-libsci/22.08.1.1 + +if ($?ICE_IOTYPE) then +if ($ICE_IOTYPE =~ pio*) then + module unload netcdf + module load netcdf-mpi/4.8.1 + module load pnetcdf/1.12.2 + if ($ICE_IOTYPE == "pio1") then + module load pio/1.10.1 + else + module load pio/2.5.9 + endif +endif +endif + +if ($?ICE_BFBTYPE) then +if ($ICE_BFBTYPE =~ qcchk*) then + module load conda +# conda env create -f ../../configuration/scripts/tests/qctest.yml + conda activate qctest +endif +endif + +# For perftools with mpiexec +# module load perftools-base +# module load perftools +#setenv PALS_TRANSFER FALSE + +endif + +limit coredumpsize unlimited +limit stacksize unlimited +setenv PALS_QUIET TRUE + +# May be needed for OpenMP memory +setenv OMP_STACKSIZE 64M +# OMP runtime diagnostics +#setenv OMP_DISPLAY_ENV TRUE + +setenv ICE_MACHINE_MACHNAME gust +setenv ICE_MACHINE_MACHINFO "Cray XE Milan Slingshot 11" +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "ifort 2021.6.0 20220226, cray-mpich 2.19, netcdf4.8.1, pnetcdf1.12.2, pio2.5.9" +setenv ICE_MACHINE_MAKE gmake +setenv ICE_MACHINE_WKDIR /glade/gust/scratch/$user/CICE_RUNS +setenv ICE_MACHINE_INPUTDATA /glade/work/tcraig/cice-consortium-data +setenv ICE_MACHINE_BASELINE /glade/gust/scratch/$user/CICE_BASELINE +setenv ICE_MACHINE_SUBMIT "qsub" +setenv ICE_MACHINE_ACCT P00000000 +setenv ICE_MACHINE_QUEUE "main" +setenv ICE_MACHINE_TPNODE 128 +setenv ICE_MACHINE_BLDTHRDS 8 +setenv ICE_MACHINE_QSTAT "qstat " diff --git a/configuration/scripts/machines/env.hera_intel b/configuration/scripts/machines/env.hera_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.high_Sierra_gnu b/configuration/scripts/machines/env.high_Sierra_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.hobart_intel b/configuration/scripts/machines/env.hobart_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.hobart_nag b/configuration/scripts/machines/env.hobart_nag old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.izumi_gnu b/configuration/scripts/machines/env.izumi_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.izumi_intel b/configuration/scripts/machines/env.izumi_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.izumi_nag b/configuration/scripts/machines/env.izumi_nag old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.izumi_pgi b/configuration/scripts/machines/env.izumi_pgi old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.koehr_intel b/configuration/scripts/machines/env.koehr_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.mustang_intel18 b/configuration/scripts/machines/env.mustang_intel18 old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.mustang_intel19 b/configuration/scripts/machines/env.mustang_intel19 old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.mustang_intel20 b/configuration/scripts/machines/env.mustang_intel20 old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.narwhal_aocc b/configuration/scripts/machines/env.narwhal_aocc old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.narwhal_cray b/configuration/scripts/machines/env.narwhal_cray old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.narwhal_gnu b/configuration/scripts/machines/env.narwhal_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.narwhal_intel b/configuration/scripts/machines/env.narwhal_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.narwhal_intelhpcx b/configuration/scripts/machines/env.narwhal_intelhpcx old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.nrlssc_gnu b/configuration/scripts/machines/env.nrlssc_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.onyx_cray b/configuration/scripts/machines/env.onyx_cray old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.onyx_gnu b/configuration/scripts/machines/env.onyx_gnu old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.onyx_intel b/configuration/scripts/machines/env.onyx_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.orion_intel b/configuration/scripts/machines/env.orion_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.phase3_intel b/configuration/scripts/machines/env.phase3_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.testmachine_intel b/configuration/scripts/machines/env.testmachine_intel old mode 100755 new mode 100644 diff --git a/configuration/scripts/machines/env.travisCI_gnu b/configuration/scripts/machines/env.travisCI_gnu old mode 100755 new mode 100644 From b893ee9424dd1256cda22e3cffbb0e3cf0754bfb Mon Sep 17 00:00:00 2001 From: "denise.worthen" Date: Wed, 2 Nov 2022 15:45:10 -0600 Subject: [PATCH 33/57] add initializationsin ice_state * initialize vsnon/vsnon_init and vicen/vicen_init --- cicecore/cicedynB/general/ice_state.F90 | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/cicecore/cicedynB/general/ice_state.F90 b/cicecore/cicedynB/general/ice_state.F90 index 47b360e99..a7842ed5e 100644 --- a/cicecore/cicedynB/general/ice_state.F90 +++ b/cicecore/cicedynB/general/ice_state.F90 @@ -194,6 +194,10 @@ subroutine alloc_state trcr_base = c0 aicen = c0 aicen_init = c0 + vicen = c0 + vicen_init = c0 + vsnon = c0 + vsnon_init = c0 end subroutine alloc_state From aa1e066ad182c92ad94c83065d7964ae5ce3657c Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Mon, 7 Nov 2022 11:25:53 -0800 Subject: [PATCH 34/57] Update KE diagnostic (#784) * Update KE diagnostic - Average U and V fields to T grid for KE calculation for B, C, and CD grid, raw U and V were combined with T grid variables in prior implementation. Also - Update max speed calculation for C grid, calc speed at both E and N points - Refactor highfreq option u,v passed into icepack for windstress calculation - This is bit-for-bit - Create local variables uvelT_icep and vvelT_icep to store highfreq info - Compute [uvelT,vvelT]_icep at start of therm calc instead of end of dynamics calc - No longer need to recalc [uvelT,vvelT]_icep on restart - Update some indentation in ice_diagnostics.F90 * Add grid average NE2TA, NE2UA --- .../cicedynB/analysis/ice_diagnostics.F90 | 156 +++++++++--------- cicecore/cicedynB/general/ice_state.F90 | 4 - cicecore/cicedynB/general/ice_step_mod.F90 | 57 ++++--- cicecore/cicedynB/infrastructure/ice_grid.F90 | 134 ++++++++++----- .../infrastructure/ice_restart_driver.F90 | 5 +- 5 files changed, 207 insertions(+), 149 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_diagnostics.F90 b/cicecore/cicedynB/analysis/ice_diagnostics.F90 index 83eb840d6..8879d6632 100644 --- a/cicecore/cicedynB/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedynB/analysis/ice_diagnostics.F90 @@ -127,7 +127,7 @@ subroutine runtime_diags (dt) alvdr_init, alvdf_init, alidr_init, alidf_init use ice_flux_bgc, only: faero_atm, faero_ocn, fiso_atm, fiso_ocn use ice_global_reductions, only: global_sum, global_sum_prod, global_maxval - use ice_grid, only: lmask_n, lmask_s, tarean, tareas, grid_ice + use ice_grid, only: lmask_n, lmask_s, tarean, tareas, grid_ice, grid_average_X2Y use ice_state ! everything ! tcraig, this is likely to cause circular dependency because ice_prescribed_mod is high level routine #ifdef CESMCOUPLED @@ -199,7 +199,8 @@ subroutine runtime_diags (dt) prsnwavg, prhosavg, psmicetot, psmliqtot, psmtot real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1, work2 + uvelT, vvelT, & ! u,v on T points + work1, work2 ! temporary real (kind=dbl_kind), parameter :: & maxval_spval = -0.9_dbl_kind*HUGE(0.0_dbl_kind) ! spval to detect @@ -209,9 +210,6 @@ subroutine runtime_diags (dt) ! returns -HUGE which we want to avoid writing. The ! return value is checked against maxval_spval before writing. -! real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & -! uvelT, vvelT - character(len=*), parameter :: subname = '(runtime_diags)' call icepack_query_parameters(ktherm_out=ktherm, calc_Tsfc_out=calc_Tsfc) @@ -284,51 +282,45 @@ subroutine runtime_diags (dt) if (tr_pond_topo) then !$OMP PARALLEL DO PRIVATE(iblk,i,j,n) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = c0 - do n = 1, ncat - work1(i,j,iblk) = work1(i,j,iblk) & - + aicen(i,j,n,iblk) & - * trcrn(i,j,nt_apnd,n,iblk) & - * trcrn(i,j,nt_hpnd,n,iblk) + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + do n = 1, ncat + work1(i,j,iblk) = work1(i,j,iblk) & + + aicen(i,j,n,iblk) & + * trcrn(i,j,nt_apnd,n,iblk) & + * trcrn(i,j,nt_hpnd,n,iblk) + enddo + enddo enddo - enddo - enddo enddo !$OMP END PARALLEL DO ptotn = global_sum(work1, distrb_info, field_loc_center, tarean) ptots = global_sum(work1, distrb_info, field_loc_center, tareas) endif - ! total ice-snow kinetic energy + ! total ice-snow kinetic energy, on T points. + if (grid_ice == 'B') then + call grid_average_X2Y('A',uvel ,'U',uvelT,'T') + call grid_average_X2Y('A',vvel ,'U',vvelT,'T') + elseif (grid_ice == 'C') then + call grid_average_X2Y('A',uvelE,'E',uvelT,'T') + call grid_average_X2Y('A',vvelN,'N',vvelT,'T') + elseif (grid_ice == 'CD') then + call grid_average_X2Y('A',uvelE,'E',uvelN,'N',uvelT,'T') + call grid_average_X2Y('A',vvelE,'E',vvelN,'N',vvelT,'T') + endif + !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - work1(i,j,iblk) = p5 & - * (rhos*vsno(i,j,iblk) + rhoi*vice(i,j,iblk)) & - * (uvel(i,j,iblk)**2 + vvel(i,j,iblk)**2) + work1(i,j,iblk) = p5 * (rhos*vsno(i,j,iblk) + rhoi*vice(i,j,iblk)) & + * (uvelT(i,j,iblk)**2 + vvelT(i,j,iblk)**2) enddo enddo enddo - ! Eventually do energy diagnostic on T points. -! if (grid_ice == 'CD') then -! !$OMP PARALLEL DO PRIVATE(iblk,i,j) -! do iblk = 1, nblocks -! do j = 1, ny_block -! do i = 1, nx_block -! call grid_average_X2Y('E2TS',uvelE,uvelT) -! call grid_average_X2Y('N2TS',vvelN,vvelT) -! work1(i,j,iblk) = p5 & -! * (rhos*vsno(i,j,iblk) + rhoi*vice(i,j,iblk)) & -! * (uvelT(i,j,iblk)*uvelT(i,j,iblk) & -! + vvelT(i,j,iblk)*vvelT(i,j,iblk)) -! enddo -! enddo -! enddo -! endif -! !$OMP END PARALLEL DO + !$OMP END PARALLEL DO ketotn = c0 ketots = c0 ketotn = global_sum(work1, distrb_info, field_loc_center, tarean) @@ -420,21 +412,22 @@ subroutine runtime_diags (dt) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - work1(i,j,iblk) = max(sqrt(uvelE(i,j,iblk)**2 & - + vvelE(i,j,iblk)**2), & - sqrt(uvelN(i,j,iblk)**2 & - + vvelN(i,j,iblk)**2)) + work1(i,j,iblk) = max(sqrt(uvelE(i,j,iblk)**2 + vvelE(i,j,iblk)**2), & + sqrt(uvelN(i,j,iblk)**2 + vvelN(i,j,iblk)**2)) enddo enddo enddo !$OMP END PARALLEL DO elseif (grid_ice == 'C') then + ! map uvelE to N and vvelN to E then compute max on E and N + call grid_average_X2Y('A',uvelE,'E',work1,'N') ! work1 =~ uvelN + call grid_average_X2Y('A',vvelN,'N',work2,'E') ! work2 =~ vvelE !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - work1(i,j,iblk) = sqrt(uvelE(i,j,iblk)**2 & - + vvelN(i,j,iblk)**2) + work1(i,j,iblk) = max(sqrt(uvelE(i,j,iblk)**2 + work2(i,j,iblk)**2), & + sqrt(work1(i,j,iblk)**2 + vvelN(i,j,iblk)**2)) enddo enddo enddo @@ -444,8 +437,7 @@ subroutine runtime_diags (dt) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - work1(i,j,iblk) = sqrt(uvel(i,j,iblk)**2 & - + vvel(i,j,iblk)**2) + work1(i,j,iblk) = sqrt(uvel(i,j,iblk)**2 + vvel(i,j,iblk)**2) enddo enddo enddo @@ -466,31 +458,31 @@ subroutine runtime_diags (dt) if (umaxn > umax_stab) then !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (abs(work1(i,j,iblk) - umaxn) < puny) then - write(nu_diag,*) ' ' - write(nu_diag,*) 'Warning, large ice speed' - write(nu_diag,*) 'my_task, iblk, i, j, umaxn:', & - my_task, iblk, i, j, umaxn - endif - enddo - enddo + do j = 1, ny_block + do i = 1, nx_block + if (abs(work1(i,j,iblk) - umaxn) < puny) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Warning, large ice speed' + write(nu_diag,*) 'my_task, iblk, i, j, umaxn:', & + my_task, iblk, i, j, umaxn + endif + enddo + enddo enddo !$OMP END PARALLEL DO elseif (umaxs > umax_stab) then !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (abs(work1(i,j,iblk) - umaxs) < puny) then - write(nu_diag,*) ' ' - write(nu_diag,*) 'Warning, large ice speed' - write(nu_diag,*) 'my_task, iblk, i, j, umaxs:', & - my_task, iblk, i, j, umaxs - endif - enddo - enddo + do j = 1, ny_block + do i = 1, nx_block + if (abs(work1(i,j,iblk) - umaxs) < puny) then + write(nu_diag,*) ' ' + write(nu_diag,*) 'Warning, large ice speed' + write(nu_diag,*) 'my_task, iblk, i, j, umaxs:', & + my_task, iblk, i, j, umaxs + endif + enddo + enddo enddo !$OMP END PARALLEL DO endif ! umax @@ -1357,14 +1349,14 @@ subroutine init_mass_diags do n=1,n_aero !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = trcr(i,j,nt_aero +4*(n-1),iblk)*vsno(i,j,iblk) & - + trcr(i,j,nt_aero+1+4*(n-1),iblk)*vsno(i,j,iblk) & - + trcr(i,j,nt_aero+2+4*(n-1),iblk)*vice(i,j,iblk) & - + trcr(i,j,nt_aero+3+4*(n-1),iblk)*vice(i,j,iblk) - enddo - enddo + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = trcr(i,j,nt_aero +4*(n-1),iblk)*vsno(i,j,iblk) & + + trcr(i,j,nt_aero+1+4*(n-1),iblk)*vsno(i,j,iblk) & + + trcr(i,j,nt_aero+2+4*(n-1),iblk)*vice(i,j,iblk) & + + trcr(i,j,nt_aero+3+4*(n-1),iblk)*vice(i,j,iblk) + enddo + enddo enddo !$OMP END PARALLEL DO totaeron(n)= global_sum(work1, distrb_info, field_loc_center, tarean) @@ -1377,17 +1369,17 @@ subroutine init_mass_diags totps = c0 !$OMP PARALLEL DO PRIVATE(iblk,i,j,n) do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = c0 - do n = 1, ncat - work1(i,j,iblk) = work1(i,j,iblk) & - + aicen(i,j,n,iblk) & - * trcrn(i,j,nt_apnd,n,iblk) & - * trcrn(i,j,nt_hpnd,n,iblk) + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + do n = 1, ncat + work1(i,j,iblk) = work1(i,j,iblk) & + + aicen(i,j,n,iblk) & + * trcrn(i,j,nt_apnd,n,iblk) & + * trcrn(i,j,nt_hpnd,n,iblk) + enddo + enddo enddo - enddo - enddo enddo !$OMP END PARALLEL DO totpn = global_sum(work1, distrb_info, field_loc_center, tarean) diff --git a/cicecore/cicedynB/general/ice_state.F90 b/cicecore/cicedynB/general/ice_state.F90 index 10e0aabf8..7b20879bc 100644 --- a/cicecore/cicedynB/general/ice_state.F90 +++ b/cicecore/cicedynB/general/ice_state.F90 @@ -110,8 +110,6 @@ module ice_state public :: & uvel , & ! x-component of velocity on U grid (m/s) vvel , & ! y-component of velocity on U grid (m/s) - uvelT , & ! x-component of velocity on T grid (m/s) - vvelT , & ! y-component of velocity on T grid (m/s) uvelE , & ! x-component of velocity on E grid (m/s) vvelE , & ! y-component of velocity on E grid (m/s) uvelN , & ! x-component of velocity on N grid (m/s) @@ -159,8 +157,6 @@ subroutine alloc_state aice0 (nx_block,ny_block,max_blocks) , & ! concentration of open water uvel (nx_block,ny_block,max_blocks) , & ! x-component of velocity on U grid (m/s) vvel (nx_block,ny_block,max_blocks) , & ! y-component of velocity on U grid (m/s) - uvelT (nx_block,ny_block,max_blocks) , & ! x-component of velocity on T grid (m/s) - vvelT (nx_block,ny_block,max_blocks) , & ! y-component of velocity on T grid (m/s) uvelE (nx_block,ny_block,max_blocks) , & ! x-component of velocity on E grid (m/s) vvelE (nx_block,ny_block,max_blocks) , & ! y-component of velocity on E grid (m/s) uvelN (nx_block,ny_block,max_blocks) , & ! x-component of velocity on N grid (m/s) diff --git a/cicecore/cicedynB/general/ice_step_mod.F90 b/cicecore/cicedynB/general/ice_step_mod.F90 index 39f10ffdf..5742bb2b9 100644 --- a/cicecore/cicedynB/general/ice_step_mod.F90 +++ b/cicecore/cicedynB/general/ice_step_mod.F90 @@ -46,6 +46,10 @@ module ice_step_mod step_snow, prep_radiation, step_radiation, ocean_mixed_layer, & update_state, biogeochemistry, step_dyn_wave, step_prep + real (kind=dbl_kind), dimension (:,:,:), allocatable :: & + uvelT_icep, & ! uvel for wind stress computation in icepack + vvelT_icep ! vvel for wind stress computation in icepack + !======================================================================= contains @@ -80,6 +84,13 @@ subroutine step_prep use ice_flux, only: uatm, vatm, uatmT, vatmT use ice_grid, only: grid_atm_dynu, grid_atm_dynv, grid_average_X2Y + use ice_state, only: uvel, vvel + + logical (kind=log_kind) :: & + highfreq ! highfreq flag + + logical (kind=log_kind), save :: & + first_call = .true. ! first call flag character(len=*), parameter :: subname = '(step_prep)' @@ -92,6 +103,26 @@ subroutine step_prep call grid_average_X2Y('S',uatm,grid_atm_dynu,uatmT,'T') call grid_average_X2Y('S',vatm,grid_atm_dynv,vatmT,'T') + !----------------------------------------------------------------- + ! Compute uvelT_icep, vvelT_icep + !----------------------------------------------------------------- + + if (first_call) then + allocate(uvelT_icep(nx_block,ny_block,max_blocks)) + allocate(vvelT_icep(nx_block,ny_block,max_blocks)) + uvelT_icep = c0 + vvelT_icep = c0 + endif + + call icepack_query_parameters(highfreq_out=highfreq) + + if (highfreq) then + call grid_average_X2Y('A', uvel, 'U', uvelT_icep, 'T') + call grid_average_X2Y('A', vvel, 'U', vvelT_icep, 'T') + endif + + first_call = .false. + end subroutine step_prep !======================================================================= @@ -209,7 +240,7 @@ subroutine step_therm1 (dt, iblk) Qa_iso, Qref_iso, fiso_evap, HDO_ocn, H2_16O_ocn, H2_18O_ocn use ice_grid, only: lmask_n, lmask_s, tmask use ice_state, only: aice, aicen, aicen_init, vicen_init, & - vice, vicen, vsno, vsnon, trcrn, uvelT, vvelT, vsnon_init + vice, vicen, vsno, vsnon, trcrn, vsnon_init #ifdef CICE_IN_NEMO use ice_state, only: aice_init #endif @@ -251,8 +282,6 @@ subroutine step_therm1 (dt, iblk) tr_pond_lvl, tr_pond_topo, calc_Tsfc, highfreq, tr_snow real (kind=dbl_kind) :: & - uvelTij, & ! cell-centered velocity, x component (m/s) - vvelTij, & ! cell-centered velocity, y component (m/s) puny ! a very small number real (kind=dbl_kind), dimension(n_aero,2,ncat) :: & @@ -336,14 +365,6 @@ subroutine step_therm1 (dt, iblk) do j = jlo, jhi do i = ilo, ihi - if (highfreq) then ! include ice velocity in calculation of wind stress - uvelTij = uvelT(i,j,iblk) - vvelTij = vvelT(i,j,iblk) - else - uvelTij = c0 - vvelTij = c0 - endif ! highfreq - if (tr_snow) then do n = 1, ncat do k = 1, nslyr @@ -389,8 +410,8 @@ subroutine step_therm1 (dt, iblk) vicen = vicen (i,j,:,iblk), & vsno = vsno (i,j, iblk), & vsnon = vsnon (i,j,:,iblk), & - uvel = uvelTij , & - vvel = vvelTij , & + uvel = uvelT_icep (i,j, iblk), & + vvel = vvelT_icep (i,j, iblk), & Tsfc = trcrn (i,j,nt_Tsfc,:,iblk), & zqsn = trcrn (i,j,nt_qsno:nt_qsno+nslyr-1,:,iblk), & zqin = trcrn (i,j,nt_qice:nt_qice+nilyr-1,:,iblk), & @@ -939,7 +960,7 @@ subroutine step_dyn_horiz (dt) use ice_flux, only: strocnxU, strocnyU, strocnxT_iavg, strocnyT_iavg use ice_flux, only: init_history_dyn use ice_grid, only: grid_average_X2Y - use ice_state, only: aiU, uvel, vvel, uvelT, vvelT + use ice_state, only: aiU use ice_transport_driver, only: advection, transport_upwind, transport_remap real (kind=dbl_kind), intent(in) :: & @@ -971,14 +992,6 @@ subroutine step_dyn_horiz (dt) if (kdyn == 2) call eap (dt) if (kdyn == 3) call implicit_solver (dt) - !----------------------------------------------------------------- - ! Compute uvelT, vvelT - ! only needed for highfreq, but compute anyway - !----------------------------------------------------------------- - - call grid_average_X2Y('A', uvel, 'U', uvelT, 'T') - call grid_average_X2Y('A', vvel, 'U', vvelT, 'T') - !----------------------------------------------------------------- ! Compute strocnxT_iavg, strocnyT_iavg for thermo and coupling !----------------------------------------------------------------- diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index b7082bf93..dfccdd413 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -2680,13 +2680,23 @@ subroutine grid_average_X2Y_NEversion(type,work1a,grid1a,work1b,grid1b,work2,gri ! state masked case('NE2US') - call grid_average_X2YS_2('NE2US',work1a,narea,npm,work1b,earea,epm,work2) + call grid_average_X2Y_2('NE2US',work1a,narea,npm,work1b,earea,epm,work2) case('EN2US') - call grid_average_X2YS_2('NE2US',work1b,narea,npm,work1a,earea,epm,work2) + call grid_average_X2Y_2('NE2US',work1b,narea,npm,work1a,earea,epm,work2) case('NE2TS') - call grid_average_X2YS_2('NE2TS',work1a,narea,npm,work1b,earea,epm,work2) + call grid_average_X2Y_2('NE2TS',work1a,narea,npm,work1b,earea,epm,work2) case('EN2TS') - call grid_average_X2YS_2('NE2TS',work1b,narea,npm,work1a,earea,epm,work2) + call grid_average_X2Y_2('NE2TS',work1b,narea,npm,work1a,earea,epm,work2) + + ! state unmasked + case('NE2UA') + call grid_average_X2Y_2('NE2UA',work1a,narea,npm,work1b,earea,epm,work2) + case('EN2UA') + call grid_average_X2Y_2('NE2UA',work1b,narea,npm,work1a,earea,epm,work2) + case('NE2TA') + call grid_average_X2Y_2('NE2TA',work1a,narea,npm,work1b,earea,epm,work2) + case('EN2TA') + call grid_average_X2Y_2('NE2TA',work1b,narea,npm,work1a,earea,epm,work2) case default call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) @@ -3580,36 +3590,6 @@ subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) end subroutine grid_average_X2YF -!======================================================================= -! Compute the minimum of adjacent values of a field at specific indices, -! depending on the grid location (U, E, N) -! - real(kind=dbl_kind) function grid_neighbor_min(field, i, j, grid_location) result(mini) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - field ! field defined at T point - - integer (kind=int_kind), intent(in) :: & - i, j - - character(len=*), intent(in) :: & - grid_location ! grid location at which to compute the minumum (U, E, N) - - character(len=*), parameter :: subname = '(grid_neighbor_min)' - - select case (trim(grid_location)) - case('U') - mini = min(field(i,j), field(i+1,j), field(i,j+1), field(i+1,j+1)) - case('E') - mini = min(field(i,j), field(i+1,j)) - case('N') - mini = min(field(i,j), field(i,j+1)) - case default - call abort_ice(subname // ' unknown grid_location: ' // grid_location) - end select - - end function grid_neighbor_min - !======================================================================= ! Shifts quantities from one grid to another ! State masked version, simple weighted averager @@ -3618,7 +3598,7 @@ end function grid_neighbor_min ! ! author: T. Craig - subroutine grid_average_X2YS_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,work2) + subroutine grid_average_X2Y_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,work2) use ice_constants, only: c0 @@ -3645,7 +3625,7 @@ subroutine grid_average_X2YS_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,wor type (block) :: & this_block ! block information for current block - character(len=*), parameter :: subname = '(grid_average_X2YS_2)' + character(len=*), parameter :: subname = '(grid_average_X2Y_2)' work2(:,:,:) = c0 @@ -3701,11 +3681,91 @@ subroutine grid_average_X2YS_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,wor enddo !$OMP END PARALLEL DO + case('NE2UA') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1a(i ,j ,iblk) & + + wght1a(i+1,j ,iblk) & + + wght1b(i ,j ,iblk) & + + wght1b(i ,j+1,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + work1a(i+1,j ,iblk)*wght1a(i+1,j ,iblk) & + + work1b(i ,j ,iblk)*wght1b(i ,j ,iblk) & + + work1b(i ,j+1,iblk)*wght1b(i ,j+1,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + + case('NE2TA') + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + wtmp = (wght1a(i ,j-1,iblk) & + + wght1a(i ,j ,iblk) & + + wght1b(i-1,j ,iblk) & + + wght1b(i ,j ,iblk)) + if (wtmp /= c0) & + work2(i,j,iblk) = (work1a(i ,j-1,iblk)*wght1a(i ,j-1,iblk) & + + work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & + + work1b(i-1,j ,iblk)*wght1b(i-1,j ,iblk) & + + work1b(i ,j ,iblk)*wght1b(i ,j ,iblk)) & + / wtmp + enddo + enddo + enddo + !$OMP END PARALLEL DO + case default call abort_ice(subname//'ERROR: unknown option '//trim(dir)) end select - end subroutine grid_average_X2YS_2 + end subroutine grid_average_X2Y_2 + +!======================================================================= +! Compute the minimum of adjacent values of a field at specific indices, +! depending on the grid location (U, E, N) +! + real(kind=dbl_kind) function grid_neighbor_min(field, i, j, grid_location) result(mini) + + real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & + field ! field defined at T point + + integer (kind=int_kind), intent(in) :: & + i, j + + character(len=*), intent(in) :: & + grid_location ! grid location at which to compute the minumum (U, E, N) + + character(len=*), parameter :: subname = '(grid_neighbor_min)' + + select case (trim(grid_location)) + case('U') + mini = min(field(i,j), field(i+1,j), field(i,j+1), field(i+1,j+1)) + case('E') + mini = min(field(i,j), field(i+1,j)) + case('N') + mini = min(field(i,j), field(i,j+1)) + case default + call abort_ice(subname // ' unknown grid_location: ' // grid_location) + end select + + end function grid_neighbor_min !======================================================================= ! Compute the maximum of adjacent values of a field at specific indices, diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index 2c7d3d63c..bd5a49eaf 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -289,7 +289,7 @@ subroutine restartfile (ice_ic) use ice_grid, only: tmask, grid_type, grid_ice, grid_average_X2Y use ice_state, only: trcr_depend, aice, vice, vsno, trcr, & aice0, aicen, vicen, vsnon, trcrn, aice_init, uvel, vvel, & - uvelE, vvelE, uvelN, vvelN, uvelT, vvelT, & + uvelE, vvelE, uvelN, vvelN, & trcr_base, nt_strata, n_trcr_strata character (*), optional :: ice_ic @@ -403,9 +403,6 @@ subroutine restartfile (ice_ic) 'vvelN',1,diag,field_loc_Nface, field_type_vector) endif - call grid_average_X2Y('A', uvel, 'U', uvelT, 'T') - call grid_average_X2Y('A', vvel, 'U', vvelT, 'T') - !----------------------------------------------------------------- ! radiation fields !----------------------------------------------------------------- From 251ca48aa8797ac75329ec569cec84e39a094b0a Mon Sep 17 00:00:00 2001 From: Denise Worthen Date: Mon, 7 Nov 2022 19:02:57 -0500 Subject: [PATCH 35/57] Add wave-ice coupling to nuopc/cmeps driver (#782) * merge latest master (#4) * Isotopes for CICE (#423) Co-authored-by: apcraig Co-authored-by: David Bailey Co-authored-by: Elizabeth Hunke * updated orbital calculations needed for cesm * fixed problems in updated orbital calculations needed for cesm * update CICE6 to support coupling with UFS * put in changes so that both ufsatm and cesm requirements for potential temperature and density are satisfied * Convergence on ustar for CICE. (#452) (#5) * Add atmiter_conv to CICE * Add documentation * trigger build the docs Co-authored-by: David A. Bailey * update icepack submodule * Revert "update icepack submodule" This reverts commit e70d1abcbeb4351195a2b81c6ce3f623c936426c. * update comp_ice.backend with temporary ice_timers fix * Fix threading problem in init_bgc * Fix additional OMP problems * changes for coldstart running * Move the forapps directory * remove cesmcoupled ifdefs * Fix logging issues for NUOPC * removal of many cpp-ifdefs * fix compile errors * fixes to get cesm working * fixed white space issue * Add restart_coszen namelist option * update icepack submodule * change Orion to orion in backend remove duplicate print lines from ice_transport_driver * add -link_mpi=dbg to debug flags (#8) * cice6 compile (#6) * enable debug build. fix to remove errors * fix an error in comp_ice.backend.libcice * change Orion to orion for machine identification * changes for consistency w/ current emc-cice5 (#13) Update to emc/develop fork to current CICE consortium Co-authored-by: David A. Bailey Co-authored-by: Tony Craig Co-authored-by: Elizabeth Hunke Co-authored-by: Mariana Vertenstein Co-authored-by: apcraig Co-authored-by: Philippe Blain * Fixcommit (#14) Align commit history between emc/develop and cice-consortium/master * Update CICE6 for integration to S2S * add wcoss_dell_p3 compiler macro * update to icepack w/ debug fix * replace SITE with MACHINE_ID * update compile scripts * Support TACC stampede (#19) * update icepack * add ice_dyn_vp module to CICE_InitMod * update gitmodules, update icepack * Update CICE to consortium master (#23) updates include: * deprecate upwind advection (CICE-Consortium#508) * add implicit VP solver (CICE-Consortium#491) * update icepack * switch icepack branches * update to icepack master but set abort flag in ITD routine to false * update icepack * Update CICE to latest Consortium master (#26) update CICE and Icepack * changes the criteria for aborting ice for thermo-conservation errors * updates the time manager * fixes two bugs in ice_therm_mushy * updates Icepack to Consortium master w/ flip of abort flag for troublesome IC cases * add cice changes for zlvs (#29) * update icepack and pointer * update icepack and revert gitmodules * Fix history features - Fix bug in history time axis when sec_init is not zero. - Fix issue with time_beg and time_end uninitialized values. - Add support for averaging with histfreq='1' by allowing histfreq_n to be any value in that case. Extend and clean up construct_filename for history files. More could be done, but wanted to preserve backwards compatibility. - Add new calendar_sec2hms to converts daily seconds to hh:mm:ss. Update the calchk calendar unit tester to check this method - Remove abort test in bcstchk, this was just causing problems in regression testing - Remove known problems documentation about problems writing when istep=1. This issue does not exist anymore with the updated time manager. - Add new tests with hist_avg = false. Add set_nml.histinst. * revert set_nml.histall * fix implementation error * update model log output in ice_init * Fix QC issues - Add netcdf ststus checks and aborts in ice_read_write.F90 - Check for end of file when reading records in ice_read_write.F90 for ice_read_nc methods - Update set_nml.qc to better specify the test, turn off leap years since we're cycling 2005 data - Add check in c ice.t-test.py to make sure there is at least 1825 files, 5 years of data - Add QC run to base_suite.ts to verify qc runs to completion and possibility to use those results directly for QC validation - Clean up error messages and some indentation in ice_read_write.F90 * Update testing - Add prod suite including 10 year gx1prod and qc test - Update unit test compare scripts * update documentation * reset calchk to 100000 years * update evp1d test * update icepack * update icepack * add memory profiling (#36) * add profile_memory calls to CICE cap * update icepack * fix rhoa when lowest_temp is 0.0 * provide default value for rhoa when imported temp_height_lowest (Tair) is 0.0 * resolves seg fault when frac_grid=false and do_ca=true * update icepack submodule * Update CICE for latest Consortium master (#38) * Implement advanced snow physics in icepack and CICE * Fix time-stamping of CICE history files * Fix CICE history file precision * Use CICE-Consortium/Icepack master (#40) * switch to icepack master at consortium * recreate cap update branch (#42) * add debug_model feature * add required variables and calls for tr_snow * remove 2 extraneous lines * remove two log print lines that were removed prior to merge of driver updates to consortium * duplicate gitmodule style for icepack * Update CICE to latest Consortium/main (#45) * Update CICE to Consortium/main (#48) Update OpenMP directives as needed including validation via new omp_suite. Fixed OpenMP in dynamics. Refactored eap puny/pi lookups to improve scalar performance Update Tsfc implementation to make sure land blocks don't set Tsfc to freezing temp Update for sea bed stress calculations * fix comment, fix env for orion and hera * replace save_init with step_prep in CICE_RunMod * fixes for cgrid repro * remove added haloupdates * baselines pass with these extra halo updates removed * change F->S for ocean velocities and tilts * fix debug failure when grid_ice=C * compiling in debug mode using -init=snan,arrays requires initialization of variables * respond to review comments * remove inserted whitespace for uvelE,N and vvelE,N * Add wave-cice coupling; update to Consortium main (#51) * add wave-ice fields * initialize aicen_init, which turns up as NaN in calc of floediam export * add call to icepack_init_wave to initialize wavefreq and dwavefreq * update to latest consortium main (PR 752) * add initializationsin ice_state * initialize vsnon/vsnon_init and vicen/vicen_init Co-authored-by: apcraig Co-authored-by: David Bailey Co-authored-by: Elizabeth Hunke Co-authored-by: Mariana Vertenstein Co-authored-by: Minsuk Ji <57227195+MinsukJi-NOAA@users.noreply.github.com> Co-authored-by: Tony Craig Co-authored-by: Philippe Blain --- cicecore/cicedynB/general/ice_state.F90 | 6 + cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 | 9 +- .../drivers/nuopc/cmeps/ice_comp_nuopc.F90 | 22 ++-- .../drivers/nuopc/cmeps/ice_import_export.F90 | 112 ++++++++++++++++-- 4 files changed, 129 insertions(+), 20 deletions(-) diff --git a/cicecore/cicedynB/general/ice_state.F90 b/cicecore/cicedynB/general/ice_state.F90 index 7b20879bc..862f0a8bc 100644 --- a/cicecore/cicedynB/general/ice_state.F90 +++ b/cicecore/cicedynB/general/ice_state.F90 @@ -188,6 +188,12 @@ subroutine alloc_state n_trcr_strata = 0 nt_strata = 0 trcr_base = c0 + aicen = c0 + aicen_init = c0 + vicen = c0 + vicen_init = c0 + vsnon = c0 + vsnon_init = c0 end subroutine alloc_state diff --git a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 index f9b5116d0..3d5e5cc2a 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 @@ -78,7 +78,7 @@ subroutine cice_init2() use ice_calendar , only: dt, dt_dyn, istep, istep1, write_ic, init_calendar, calendar use ice_communicate , only: my_task, master_task use ice_diagnostics , only: init_diags - use ice_domain_size , only: ncat, nfsd + use ice_domain_size , only: ncat, nfsd, nfreq use ice_dyn_eap , only: init_eap, alloc_dyn_eap use ice_dyn_shared , only: kdyn, init_dyn use ice_dyn_vp , only: init_vp @@ -94,10 +94,12 @@ subroutine cice_init2() use ice_restoring , only: ice_HaloRestore_init use ice_timers , only: timer_total, init_ice_timers, ice_timer_start use ice_transport_driver , only: init_transport + use ice_arrays_column , only: wavefreq, dwavefreq logical(kind=log_kind) :: tr_aero, tr_zaero, skl_bgc, z_tracers logical(kind=log_kind) :: tr_iso, tr_fsd, wave_spec, tr_snow character(len=char_len) :: snw_aging_table + real(kind=dbl_kind), dimension(25) :: wave_spectrum_profile ! hardwire for now character(len=*), parameter :: subname = '(cice_init2)' !---------------------------------------------------- @@ -177,6 +179,11 @@ subroutine cice_init2() endif endif + if (wave_spec) then + call icepack_init_wave(nfreq=nfreq, & + wave_spectrum_profile=wave_spectrum_profile, wavefreq=wavefreq, dwavefreq=dwavefreq) + end if + ! Initialize shortwave components using swdn from previous timestep ! if restarting. These components will be scaled to current forcing ! in prep_radiation. diff --git a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 index 8920ea386..182308973 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 @@ -25,6 +25,7 @@ module ice_comp_nuopc use ice_calendar , only : force_restart_now, write_ic use ice_calendar , only : idate, mday, mmonth, myear, year_init use ice_calendar , only : msec, dt, calendar, calendar_type, nextsw_cday, istep + use ice_calendar , only : ice_calendar_noleap, ice_calendar_gregorian use ice_kinds_mod , only : dbl_kind, int_kind, char_len, char_len_long use ice_fileunits , only : nu_diag, nu_diag_set, inst_index, inst_name use ice_fileunits , only : inst_suffix, release_all_fileunits, flush_fileunit @@ -80,9 +81,6 @@ module ice_comp_nuopc character(len=*) , parameter :: orb_variable_year = 'variable_year' character(len=*) , parameter :: orb_fixed_parameters = 'fixed_parameters' - character(len=*),parameter :: shr_cal_noleap = 'NO_LEAP' - character(len=*),parameter :: shr_cal_gregorian = 'GREGORIAN' - type(ESMF_Mesh) :: ice_mesh integer :: nthrds ! Number of threads to use in this component @@ -216,7 +214,6 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) type(ESMF_Time) :: stopTime ! Stop time type(ESMF_Time) :: refTime ! Ref time type(ESMF_TimeInterval) :: timeStep ! Model timestep - type(ESMF_Calendar) :: esmf_calendar ! esmf calendar type(ESMF_CalKind_Flag) :: esmf_caltype ! esmf calendar type integer :: start_ymd ! Start date (YYYYMMDD) integer :: start_tod ! start time of day (s) @@ -339,7 +336,8 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call get_component_instance(gcomp, inst_suffix, inst_index, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return - inst_name = "ICE"//trim(inst_suffix) +! inst_name = "ICE"//trim(inst_suffix) + inst_name = "ICE" !---------------------------------------------------------------------------- ! start cice timers @@ -470,9 +468,9 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call ESMF_TimeGet( currTime, calkindflag=esmf_caltype, rc=rc ) if (ChkErr(rc,__LINE__,u_FILE_u)) return if (esmf_caltype == ESMF_CALKIND_NOLEAP) then - calendar_type = shr_cal_noleap + calendar_type = ice_calendar_noleap else if (esmf_caltype == ESMF_CALKIND_GREGORIAN) then - calendar_type = shr_cal_gregorian + calendar_type = ice_calendar_gregorian else call abort_ice( subname//'ERROR:: bad calendar for ESMF' ) end if @@ -581,9 +579,11 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) end if call icepack_query_parameters( tfrz_option_out=tfrz_option) if (tfrz_option_driver /= tfrz_option) then - write(errmsg,'(a)') trim(subname)//'error: tfrz_option from driver '//trim(tfrz_option_driver)//& - ' must be the same as tfrz_option from cice namelist '//trim(tfrz_option) - call abort_ice(trim(errmsg)) + write(errmsg,'(a)') trim(subname)//'WARNING: tfrz_option from driver '//trim(tfrz_option_driver)//& + ' is overwriting tfrz_option from cice namelist '//trim(tfrz_option) + write(nu_diag,*) trim(errmsg) + call icepack_warnings_flush(nu_diag) + call icepack_init_parameters(tfrz_option_in=tfrz_option_driver) endif ! Flux convergence tolerance - always use the driver attribute value @@ -594,7 +594,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) read(cvalue,*) atmiter_conv_driver call icepack_query_parameters( atmiter_conv_out=atmiter_conv) if (atmiter_conv_driver /= atmiter_conv) then - write(errmsg,'(a,d13.5,a,d13.5)') trim(subname)//'warning: atmiter_ from driver ',& + write(errmsg,'(a,d13.5,a,d13.5)') trim(subname)//'WARNING: atmiter_ from driver ',& atmiter_conv_driver,' is overwritting atmiter_conv from cice namelist ',atmiter_conv write(nu_diag,*) trim(errmsg) call icepack_warnings_flush(nu_diag) diff --git a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 index 5a6ce7572..d95a4d9b2 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 @@ -3,12 +3,13 @@ module ice_import_export use ESMF use NUOPC use NUOPC_Model - use ice_kinds_mod , only : int_kind, dbl_kind, char_len, log_kind + use ice_kinds_mod , only : int_kind, dbl_kind, char_len, char_len_long, log_kind use ice_constants , only : c0, c1, spval_dbl, radius use ice_constants , only : field_loc_center, field_type_scalar, field_type_vector use ice_blocks , only : block, get_block, nx_block, ny_block use ice_domain , only : nblocks, blocks_ice, halo_info, distrb_info use ice_domain_size , only : nx_global, ny_global, block_size_x, block_size_y, max_blocks, ncat + use ice_domain_size , only : nfreq, nfsd use ice_exit , only : abort_ice use ice_flux , only : strairxT, strairyT, strocnxT_iavg, strocnyT_iavg use ice_flux , only : alvdr, alidr, alvdf, alidf, Tref, Qref, Uref @@ -23,9 +24,10 @@ module ice_import_export use ice_flux , only : fsnow, uocn, vocn, sst, ss_tltx, ss_tlty, frzmlt use ice_flux , only : send_i2x_per_cat use ice_flux , only : sss, Tf, wind, fsw - use ice_state , only : vice, vsno, aice, aicen_init, trcr + use ice_arrays_column , only : floe_rad_c, wave_spectrum + use ice_state , only : vice, vsno, aice, aicen_init, trcr, trcrn use ice_grid , only : tlon, tlat, tarea, tmask, anglet, hm - use ice_grid , only : grid_type, grid_average_X2Y + use ice_grid , only : grid_type use ice_mesh_mod , only : ocn_gridcell_frac use ice_boundary , only : ice_HaloUpdate use ice_fileunits , only : nu_diag, flush_fileunit @@ -34,8 +36,10 @@ module ice_import_export use ice_shr_methods , only : chkerr, state_reset use icepack_intfc , only : icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc , only : icepack_query_parameters, icepack_query_tracer_flags + use icepack_intfc , only : icepack_query_tracer_indices use icepack_intfc , only : icepack_liquidus_temperature use icepack_intfc , only : icepack_sea_freezing_temperature + use icepack_parameters , only : puny, c2 use cice_wrapper_mod , only : t_startf, t_stopf, t_barrierf #ifdef CESMCOUPLED use shr_frz_mod , only : shr_frz_freezetemp @@ -112,6 +116,7 @@ subroutine ice_advertise_fields(gcomp, importState, exportState, flds_scalar_nam character(char_len) :: stdname character(char_len) :: cvalue logical :: flds_wiso ! use case + logical :: flds_wave ! use case logical :: isPresent, isSet character(len=*), parameter :: subname='(ice_import_export:ice_advertise_fields)' !------------------------------------------------------------------------------- @@ -148,6 +153,17 @@ subroutine ice_advertise_fields(gcomp, importState, exportState, flds_scalar_nam write(nu_diag,*)'flds_wiso = ',flds_wiso end if + flds_wave = .false. + call NUOPC_CompAttributeGet(gcomp, name='wav_coupling_to_cice', value=cvalue, & + isPresent=isPresent, isSet=isSet, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (isPresent .and. isSet) then + read(cvalue,*) flds_wave + end if + if (my_task == master_task) then + write(nu_diag,*)'flds_wave = ',flds_wave + end if + !----------------- ! advertise import fields !----------------- @@ -192,6 +208,14 @@ subroutine ice_advertise_fields(gcomp, importState, exportState, flds_scalar_nam ! from atm - dry dust deposition fluxes (4 sizes) call fldlist_add(fldsToIce_num, fldsToIce, 'Faxa_dstdry', ungridded_lbound=1, ungridded_ubound=4) + ! the following are advertised but might not be connected if they are not advertised in the + ! in the cmeps esmFldsExchange_xxx_mod.F90 that is model specific + ! from wave + if (flds_wave) then + call fldlist_add(fldsToIce_num, fldsToIce, 'Sw_elevation_spectrum', ungridded_lbound=1, & + ungridded_ubound=25) + end if + do n = 1,fldsToIce_num call NUOPC_Advertise(importState, standardName=fldsToIce(n)%stdname, & TransferOfferGeomObject='will provide', rc=rc) @@ -225,6 +249,10 @@ subroutine ice_advertise_fields(gcomp, importState, exportState, flds_scalar_nam call fldlist_add(fldsFrIce_num, fldsFrIce, 'ice_fraction_n', & ungridded_lbound=1, ungridded_ubound=ncat) end if + if (flds_wave) then + call fldlist_add(fldsFrIce_num, fldsFrIce, 'Si_thick' ) + call fldlist_add(fldsFrIce_num, fldsFrIce, 'Si_floediam' ) + end if ! ice/atm fluxes computed by ice call fldlist_add(fldsFrIce_num, fldsFrIce, 'stress_on_air_ice_zonal' ) @@ -292,7 +320,7 @@ subroutine ice_realize_fields(gcomp, mesh, flds_scalar_name, flds_scalar_num, rc type(ESMF_State) :: exportState type(ESMF_Field) :: lfield integer :: numOwnedElements - integer :: i, j, iblk, n + integer :: i, j, iblk, n, k integer :: ilo, ihi, jlo, jhi ! beginning and end of physical domain type(block) :: this_block ! block information for current block real(dbl_kind), allocatable :: mesh_areas(:) @@ -403,11 +431,10 @@ subroutine ice_import( importState, rc ) ! local variables integer,parameter :: nflds=16 integer,parameter :: nfldv=6 - integer :: i, j, iblk, n + integer :: i, j, iblk, n, k integer :: ilo, ihi, jlo, jhi !beginning and end of physical domain type(block) :: this_block ! block information for current block real (kind=dbl_kind),allocatable :: aflds(:,:,:,:) - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: work real (kind=dbl_kind) :: workx, worky real (kind=dbl_kind) :: MIN_RAIN_TEMP, MAX_SNOW_TEMP real (kind=dbl_kind) :: Tffresh @@ -559,6 +586,29 @@ subroutine ice_import( importState, rc ) end do !$OMP END PARALLEL DO + ! import wave elevation spectrum from wave (frequencies 1-25, assume that nfreq is 25) + if (State_FldChk(importState, 'Sw_elevation_spectrum')) then + if (nfreq /= 25) then + call abort_ice(trim(subname)//": ERROR nfreq not equal to 25 ") + end if + call state_getfldptr(importState, 'Sw_elevation_spectrum', fldptr=dataPtr2d, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + do k = 1,nfreq + n = 0 + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo; ihi = this_block%ihi + jlo = this_block%jlo; jhi = this_block%jhi + do j = jlo, jhi + do i = ilo, ihi + n = n+1 + wave_spectrum(i,j,k,iblk) = dataPtr2d(k,n) + end do + end do + end do + end do + end if + if ( State_fldChk(importState, 'Sa_ptem') .and. State_fldchk(importState,'air_density_height_lowest')) then !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks @@ -845,7 +895,7 @@ subroutine ice_export( exportState, rc ) ! local variables type(block) :: this_block ! block information for current block - integer :: i, j, iblk, n ! incides + integer :: i, j, iblk, n, k ! indices integer :: n2 ! thickness category index integer :: ilo, ihi, jlo, jhi ! beginning and end of physical domain real (kind=dbl_kind) :: workx, worky ! tmps for converting grid @@ -859,7 +909,11 @@ subroutine ice_export( exportState, rc ) real (kind=dbl_kind) :: tauxo (nx_block,ny_block,max_blocks) ! ice/ocean stress real (kind=dbl_kind) :: tauyo (nx_block,ny_block,max_blocks) ! ice/ocean stress real (kind=dbl_kind) :: ailohi(nx_block,ny_block,max_blocks) ! fractional ice area + real (kind=dbl_kind) :: floediam(nx_block,ny_block,max_blocks) + real (kind=dbl_kind) :: floethick(nx_block,ny_block,max_blocks) ! ice thickness real (kind=dbl_kind) :: Tffresh + logical (kind=log_kind) :: tr_fsd + integer (kind=int_kind) :: nt_fsd real (kind=dbl_kind), allocatable :: tempfld(:,:,:) real (kind=dbl_kind), pointer :: dataptr_ifrac_n(:,:) real (kind=dbl_kind), pointer :: dataptr_swpen_n(:,:) @@ -877,6 +931,9 @@ subroutine ice_export( exportState, rc ) ! tr_FY_out=tr_FY, tr_pond_out=tr_pond, tr_lvl_out=tr_lvl, & ! tr_zaero_out=tr_zaero, tr_bgc_Nit_out=tr_bgc_Nit) + call icepack_query_tracer_indices(nt_fsd_out=nt_fsd) + call icepack_query_tracer_flags(tr_fsd_out=tr_fsd) + call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=u_FILE_u, line=__LINE__) @@ -890,8 +947,10 @@ subroutine ice_export( exportState, rc ) tauya(:,:,:) = c0 tauxo(:,:,:) = c0 tauyo(:,:,:) = c0 + floediam(:,:,:) = c0 + floethick(:,:,:) = c0 - !$OMP PARALLEL DO PRIVATE(iblk,i,j,workx,worky, this_block, ilo, ihi, jlo, jhi) + !$OMP PARALLEL DO PRIVATE(iblk,i,j,k,workx,worky, this_block, ilo, ihi, jlo, jhi) do iblk = 1, nblocks this_block = get_block(blocks_ice(iblk),iblk) ilo = this_block%ilo @@ -904,6 +963,27 @@ subroutine ice_export( exportState, rc ) ! ice fraction ailohi(i,j,iblk) = min(aice(i,j,iblk), c1) + if (tr_fsd) then + ! floe thickness (m) + if (aice(i,j,iblk) > puny) then + floethick(i,j,iblk) = vice(i,j,iblk) / aice(i,j,iblk) + else + floethick(i,j,iblk) = c0 + end if + + ! floe diameter (m) + workx = c0 + worky = c0 + do n = 1, ncat + do k = 1, nfsd + workx = workx + floe_rad_c(k) * aicen_init(i,j,n,iblk) * trcrn(i,j,nt_fsd+k-1,n,iblk) + worky = worky + aicen_init(i,j,n,iblk) * trcrn(i,j,nt_fsd+k-1,n,iblk) + end do + end do + if (worky > c0) workx = c2*workx / worky + floediam(i,j,iblk) = MAX(c2*floe_rad_c(1),workx) + endif + ! surface temperature Tsrf(i,j,iblk) = Tffresh + trcr(i,j,1,iblk) !Kelvin (original ???) @@ -1054,6 +1134,22 @@ subroutine ice_export( exportState, rc ) call state_setexport(exportState, 'Si_snowh' , input=tempfld , lmask=tmask, ifrac=ailohi, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return + ! ------ + ! optional floe diameter and ice thickness to wave + ! ------ + + ! Sea ice thickness (m) + if (State_FldChk(exportState, 'Si_thick')) then + call state_setexport(exportState, 'Si_thick' , input=floethick , lmask=tmask, ifrac=ailohi, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + end if + + ! Sea ice floe diameter (m) + if (State_FldChk(exportState, 'Si_floediam')) then + call state_setexport(exportState, 'Si_floediam' , input=floediam , lmask=tmask, ifrac=ailohi, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + end if + ! ------ ! ice/atm fluxes computed by ice ! ------ From 5a32f12814ff6d53ab962a975a4404973811f941 Mon Sep 17 00:00:00 2001 From: Elizabeth Hunke Date: Tue, 15 Nov 2022 17:53:47 -0700 Subject: [PATCH 36/57] finalize 0-layer thermo and cesm ponds deprecation in CICE (#787) * finalize 0-layer thermo and cesm ponds deprecation * set default hs0=0 and document * formatting table in doc * revert hs0=0.03 * update Icepack --- .../cicedynB/analysis/ice_history_pond.F90 | 30 +---- .../dynamics/ice_transport_driver.F90 | 15 --- cicecore/cicedynB/general/ice_init.F90 | 123 ------------------ cicecore/cicedynB/general/ice_step_mod.F90 | 8 -- .../io/io_binary/ice_restart.F90 | 71 ---------- .../io/io_netcdf/ice_restart.F90 | 15 --- .../infrastructure/io/io_pio2/ice_restart.F90 | 15 --- .../drivers/direct/hadgem3/CICE_InitMod.F90 | 30 ----- .../drivers/direct/hadgem3/CICE_RunMod.F90 | 15 --- .../direct/nemo_concepts/CICE_InitMod.F90 | 30 ----- .../direct/nemo_concepts/CICE_RunMod.F90 | 15 --- cicecore/drivers/mct/cesm1/CICE_InitMod.F90 | 30 ----- cicecore/drivers/mct/cesm1/CICE_RunMod.F90 | 15 --- cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 | 30 ----- cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 | 15 --- cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 | 30 ----- cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 | 15 --- .../drivers/standalone/cice/CICE_InitMod.F90 | 30 ----- .../drivers/standalone/cice/CICE_RunMod.F90 | 15 --- .../unittest/gridavgchk/CICE_InitMod.F90 | 30 ----- .../drivers/unittest/sumchk/CICE_InitMod.F90 | 30 ----- cicecore/shared/ice_init_column.F90 | 29 ----- cicecore/shared/ice_restart_column.F90 | 74 ----------- .../convert_restarts.f90 | 4 - doc/source/cice_index.rst | 13 +- doc/source/user_guide/ug_case_settings.rst | 5 +- icepack | 2 +- 27 files changed, 10 insertions(+), 724 deletions(-) diff --git a/cicecore/cicedynB/analysis/ice_history_pond.F90 b/cicecore/cicedynB/analysis/ice_history_pond.F90 index f6e4b8737..8818ff94e 100644 --- a/cicecore/cicedynB/analysis/ice_history_pond.F90 +++ b/cicecore/cicedynB/analysis/ice_history_pond.F90 @@ -268,13 +268,8 @@ subroutine accum_hist_pond (iblk) integer (kind=int_kind) :: & nt_apnd, nt_hpnd, nt_alvl, nt_ipnd -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: & - tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: & tr_pond_lvl, tr_pond_topo -#endif real (kind=dbl_kind) :: & puny @@ -289,13 +284,8 @@ subroutine accum_hist_pond (iblk) !--------------------------------------------------------------- call icepack_query_parameters(puny_out=puny) -#ifdef UNDEPRECATE_CESMPONDS - call icepack_query_tracer_flags(tr_pond_cesm_out=tr_pond_cesm, & - tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo) -#else call icepack_query_tracer_flags(tr_pond_lvl_out=tr_pond_lvl, & tr_pond_topo_out=tr_pond_topo) -#endif call icepack_query_tracer_indices(nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, & nt_alvl_out=nt_alvl, nt_ipnd_out=nt_ipnd) call icepack_warnings_flush(nu_diag) @@ -303,27 +293,9 @@ subroutine accum_hist_pond (iblk) file=__FILE__, line=__LINE__) if (allocated(a2D)) then -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - if (f_apond(1:1)/= 'x') & - call accum_hist_field(n_apond, iblk, & - trcr(:,:,nt_apnd,iblk), a2D) - if (f_apond_ai(1:1)/= 'x') & - call accum_hist_field(n_apond_ai, iblk, & - aice(:,:,iblk) * trcr(:,:,nt_apnd,iblk), a2D) - if (f_hpond(1:1)/= 'x') & - call accum_hist_field(n_hpond, iblk, & - trcr(:,:,nt_apnd,iblk) & - * trcr(:,:,nt_hpnd,iblk), a2D) - if (f_hpond_ai(1:1)/= 'x') & - call accum_hist_field(n_hpond_ai, iblk, & - aice(:,:,iblk) * trcr(:,:,nt_apnd,iblk) & - * trcr(:,:,nt_hpnd,iblk), a2D) - elseif (tr_pond_lvl) then -#else if (tr_pond_lvl) then -#endif + if (f_apond(1:1)/= 'x') & call accum_hist_field(n_apond, iblk, & trcr(:,:,nt_alvl,iblk) * trcr(:,:,nt_apnd,iblk), a2D) diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 index 43fe5af13..30fe546e0 100644 --- a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 +++ b/cicecore/cicedynB/dynamics/ice_transport_driver.F90 @@ -1534,13 +1534,8 @@ subroutine state_to_work (nx_block, ny_block, & integer (kind=int_kind) :: & nt_alvl, nt_apnd, nt_fbri -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: & - tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: & tr_pond_lvl, tr_pond_topo -#endif integer (kind=int_kind) :: & i, j, n, it, & ! counting indices @@ -1548,13 +1543,8 @@ subroutine state_to_work (nx_block, ny_block, & character(len=*), parameter :: subname = '(state_to_work)' -#ifdef UNDEPRECATE_CESMPONDS - call icepack_query_tracer_flags(tr_pond_cesm_out=tr_pond_cesm, & - tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo) -#else call icepack_query_tracer_flags(tr_pond_lvl_out=tr_pond_lvl, & tr_pond_topo_out=tr_pond_topo) -#endif call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_apnd_out=nt_apnd, & nt_fbri_out=nt_fbri) call icepack_warnings_flush(nu_diag) @@ -1613,13 +1603,8 @@ subroutine state_to_work (nx_block, ny_block, & * trcrn(i,j,it ,n) enddo enddo -#ifdef UNDEPRECATE_CESMPONDS - elseif (trcr_depend(it) == 2+nt_apnd .and. & - tr_pond_cesm .or. tr_pond_topo) then -#else elseif (trcr_depend(it) == 2+nt_apnd .and. & tr_pond_topo) then -#endif do j = 1, ny_block do i = 1, nx_block works(i,j,narrays+it) = aicen(i,j ,n) & diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index a6c4d7941..45ae58d8b 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -73,15 +73,9 @@ subroutine input_data npt, dt, ndtd, days_per_year, use_leap_years, & write_ic, dump_last, npt_unit use ice_arrays_column, only: oceanmixed_ice -#ifdef UNDEPRECATE_CESMPONDS - use ice_restart_column, only: restart_age, restart_FY, restart_lvl, & - restart_pond_cesm, restart_pond_lvl, restart_pond_topo, restart_aero, & - restart_fsd, restart_iso, restart_snow -#else use ice_restart_column, only: restart_age, restart_FY, restart_lvl, & restart_pond_lvl, restart_pond_topo, restart_aero, & restart_fsd, restart_iso, restart_snow -#endif use ice_restart_shared, only: & restart, restart_ext, restart_coszen, restart_dir, restart_file, pointer_file, & runid, runtype, use_restart_time, restart_format, lcdf64 @@ -165,23 +159,12 @@ subroutine input_data logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_pond logical (kind=log_kind) :: tr_iso, tr_aero, tr_fsd, tr_snow -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: tr_pond_lvl, tr_pond_topo -#endif integer (kind=int_kind) :: numin, numax ! unit number limits -#ifdef UNDEPRECATE_CESMPONDS - integer (kind=int_kind) :: rpcesm, rplvl, rptopo -#else integer (kind=int_kind) :: rplvl, rptopo -#endif real (kind=dbl_kind) :: Cf, ksno, puny character (len=char_len) :: abort_list -#ifdef CESMCOUPLED - character (len=64) :: tmpstr -#endif character (len=128) :: tmpstr2 character(len=*), parameter :: subname='(input_data)' @@ -221,9 +204,6 @@ subroutine input_data tr_iage, restart_age, & tr_FY, restart_FY, & tr_lvl, restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, restart_pond_cesm, & -#endif tr_pond_lvl, restart_pond_lvl, & tr_pond_topo, restart_pond_topo, & tr_snow, restart_snow, & @@ -445,11 +425,7 @@ subroutine input_data conserv_check = .false. ! tracer conservation check shortwave = 'ccsm3' ! 'ccsm3' or 'dEdd' (delta-Eddington) albedo_type = 'ccsm3' ! 'ccsm3' or 'constant' -#ifdef UNDEPRECATE_0LAYER - ktherm = 1 ! -1 = OFF, 0 = 0-layer, 1 = BL99, 2 = mushy thermo -#else ktherm = 1 ! -1 = OFF, 1 = BL99, 2 = mushy thermo -#endif conduct = 'bubbly' ! 'MU71' or 'bubbly' (Pringle et al 2007) coriolis = 'latitude' ! latitude dependent, or 'constant' ssh_stress = 'geostrophic' ! 'geostrophic' or 'coupled' @@ -559,10 +535,6 @@ subroutine input_data restart_FY = .false. ! ice age restart tr_lvl = .false. ! level ice restart_lvl = .false. ! level ice restart -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm = .false. ! CESM melt ponds - restart_pond_cesm = .false. ! melt ponds restart -#endif tr_pond_lvl = .false. ! level-ice melt ponds restart_pond_lvl = .false. ! melt ponds restart tr_pond_topo = .false. ! explicit melt ponds (topographic) @@ -1034,10 +1006,6 @@ subroutine input_data call broadcast_scalar(restart_FY, master_task) call broadcast_scalar(tr_lvl, master_task) call broadcast_scalar(restart_lvl, master_task) -#ifdef UNDEPRECATE_CESMPONDS - call broadcast_scalar(tr_pond_cesm, master_task) - call broadcast_scalar(restart_pond_cesm, master_task) -#endif call broadcast_scalar(tr_pond_lvl, master_task) call broadcast_scalar(restart_pond_lvl, master_task) call broadcast_scalar(tr_pond_topo, master_task) @@ -1130,9 +1098,6 @@ subroutine input_data restart_age = .false. restart_fy = .false. restart_lvl = .false. -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm = .false. -#endif restart_pond_lvl = .false. restart_pond_topo = .false. restart_snow = .false. @@ -1249,29 +1214,15 @@ subroutine input_data endif endif -#ifdef UNDEPRECATE_CESMPONDS - rpcesm = 0 -#endif rplvl = 0 rptopo = 0 -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) rpcesm = 1 -#endif if (tr_pond_lvl ) rplvl = 1 if (tr_pond_topo) rptopo = 1 tr_pond = .false. ! explicit melt ponds -#ifdef UNDEPRECATE_CESMPONDS - if (rpcesm + rplvl + rptopo > 0) tr_pond = .true. -#else if (rplvl + rptopo > 0) tr_pond = .true. -#endif -#ifdef UNDEPRECATE_CESMPONDS - if (rpcesm + rplvl + rptopo > 1) then -#else if (rplvl + rptopo > 1) then -#endif if (my_task == master_task) then write(nu_diag,*) subname//' ERROR: Must use only one melt pond scheme' endif @@ -1495,13 +1446,6 @@ subroutine input_data abort_list = trim(abort_list)//":16" endif -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=T and frzpnd=cesm' - abort_list = trim(abort_list)//":17" - endif -#endif - if (.not. tr_lvl) then if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=T and tr_lvl=F' abort_list = trim(abort_list)//":18" @@ -1925,10 +1869,6 @@ subroutine input_data tmpstr2 = ' : Bitz and Lipscomb 1999 thermo' elseif (ktherm == 2) then tmpstr2 = ' : mushy-layer thermo' -#ifdef UNDEPRECATE_0LAYER - elseif (ktherm == 0) then - tmpstr2 = ' : zero-layer thermo' -#endif elseif (ktherm < 0) then tmpstr2 = ' : Thermodynamics disabled' else @@ -2109,14 +2049,7 @@ subroutine input_data write(nu_diag,*) ' ' write(nu_diag,*) ' Melt ponds' write(nu_diag,*) '--------------------------------' -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - write(nu_diag,1010) ' tr_pond_cesm = ', tr_pond_cesm,' : CESM pond formulation' - write(nu_diag,1002) ' pndaspect = ', pndaspect,' : ratio of pond depth to area fraction' - elseif (tr_pond_lvl) then -#else if (tr_pond_lvl) then -#endif write(nu_diag,1010) ' tr_pond_lvl = ', tr_pond_lvl,' : level-ice pond formulation' write(nu_diag,1002) ' pndaspect = ', pndaspect,' : ratio of pond depth to area fraction' write(nu_diag,1000) ' dpscale = ', dpscale,' : time scale for flushing in permeable ice' @@ -2229,9 +2162,6 @@ subroutine input_data if (tr_lvl) write(nu_diag,1010) ' tr_lvl = ', tr_lvl,' : ridging related tracers' if (tr_pond_lvl) write(nu_diag,1010) ' tr_pond_lvl = ', tr_pond_lvl,' : level-ice pond formulation' if (tr_pond_topo) write(nu_diag,1010) ' tr_pond_topo = ', tr_pond_topo,' : topo pond formulation' -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) write(nu_diag,1010) ' tr_pond_cesm = ', tr_pond_cesm,' : CESM pond formulation' -#endif if (tr_snow) write(nu_diag,1010) ' tr_snow = ', tr_snow,' : advanced snow physics' if (tr_iage) write(nu_diag,1010) ' tr_iage = ', tr_iage,' : chronological ice age' if (tr_FY) write(nu_diag,1010) ' tr_FY = ', tr_FY,' : first-year ice area' @@ -2356,9 +2286,6 @@ subroutine input_data write(nu_diag,1011) ' restart_age = ', restart_age write(nu_diag,1011) ' restart_FY = ', restart_FY write(nu_diag,1011) ' restart_lvl = ', restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - write(nu_diag,1011) ' restart_pond_cesm= ', restart_pond_cesm -#endif write(nu_diag,1011) ' restart_pond_lvl = ', restart_pond_lvl write(nu_diag,1011) ' restart_pond_topo= ', restart_pond_topo write(nu_diag,1011) ' restart_snow = ', restart_snow @@ -2457,11 +2384,7 @@ subroutine input_data call icepack_init_tracer_flags(tr_iage_in=tr_iage, tr_FY_in=tr_FY, & tr_lvl_in=tr_lvl, tr_iso_in=tr_iso, tr_aero_in=tr_aero, & tr_fsd_in=tr_fsd, tr_snow_in=tr_snow, tr_pond_in=tr_pond, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm_in=tr_pond_cesm, tr_pond_lvl_in=tr_pond_lvl, tr_pond_topo_in=tr_pond_topo) -#else tr_pond_lvl_in=tr_pond_lvl, tr_pond_topo_in=tr_pond_topo) -#endif call icepack_init_tracer_sizes(ncat_in=ncat, nilyr_in=nilyr, nslyr_in=nslyr, nblyr_in=nblyr, & nfsd_in=nfsd, n_algae_in=n_algae, n_iso_in=n_iso, n_aero_in=n_aero, & n_DOC_in=n_DOC, n_DON_in=n_DON, & @@ -2518,18 +2441,10 @@ subroutine init_state it , & ! tracer index iblk ! block index -#ifdef UNDEPRECATE_0LAYER - logical (kind=log_kind) :: & - heat_capacity ! from icepack -#endif integer (kind=int_kind) :: ntrcr logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: tr_pond_lvl, tr_pond_topo -#endif logical (kind=log_kind) :: tr_snow, tr_fsd integer (kind=int_kind) :: nt_Tsfc, nt_sice, nt_qice, nt_qsno, nt_iage, nt_FY integer (kind=int_kind) :: nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd @@ -2543,17 +2458,10 @@ subroutine init_state !----------------------------------------------------------------- -#ifdef UNDEPRECATE_0LAYER - call icepack_query_parameters(heat_capacity_out=heat_capacity) -#endif call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & tr_lvl_out=tr_lvl, tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo, & -#else tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo, & -#endif tr_snow_out=tr_snow, tr_fsd_out=tr_fsd) call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & nt_qice_out=nt_qice, nt_qsno_out=nt_qsno, nt_iage_out=nt_iage, nt_fy_out=nt_fy, & @@ -2587,25 +2495,6 @@ subroutine init_state file=__FILE__, line=__LINE__) endif -#ifdef UNDEPRECATE_0LAYER - if (.not.heat_capacity) then - - if (nilyr > 1) then - write(nu_diag,*) subname//' ERROR: Must have nilyr = 1 if heat_capacity=F' - write(nu_diag,*) subname//' ERROR: nilyr =', nilyr - call abort_ice(error_message=subname//' Too many ice layers', & - file=__FILE__, line=__LINE__) - endif - - if (nslyr > 1) then - write(nu_diag,*) subname//' ERROR: Must have nslyr = 1 if heat_capacity=F' - write(nu_diag,*) subname//' ERROR: nslyr =', nslyr - call abort_ice(error_message=subname//' Too many snow layers', & - file=__FILE__, line=__LINE__) - endif - - endif ! heat_capacity = F -#endif endif ! my_task !----------------------------------------------------------------- @@ -2624,12 +2513,6 @@ subroutine init_state if (tr_FY) trcr_depend(nt_FY) = 0 ! area-weighted first-year ice area if (tr_lvl) trcr_depend(nt_alvl) = 0 ! level ice area if (tr_lvl) trcr_depend(nt_vlvl) = 1 ! level ice volume -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - trcr_depend(nt_apnd) = 0 ! melt pond area - trcr_depend(nt_hpnd) = 2+nt_apnd ! melt pond depth - endif -#endif if (tr_pond_lvl) then trcr_depend(nt_apnd) = 2+nt_alvl ! melt pond area trcr_depend(nt_hpnd) = 2+nt_apnd ! melt pond depth @@ -2691,12 +2574,6 @@ subroutine init_state nt_strata (it,2) = 0 enddo -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - n_trcr_strata(nt_hpnd) = 1 ! melt pond depth - nt_strata (nt_hpnd,1) = nt_apnd ! on melt pond area - endif -#endif if (tr_pond_lvl) then n_trcr_strata(nt_apnd) = 1 ! melt pond area nt_strata (nt_apnd,1) = nt_alvl ! on level ice area diff --git a/cicecore/cicedynB/general/ice_step_mod.F90 b/cicecore/cicedynB/general/ice_step_mod.F90 index 5742bb2b9..8dd6fe49a 100644 --- a/cicecore/cicedynB/general/ice_step_mod.F90 +++ b/cicecore/cicedynB/general/ice_step_mod.F90 @@ -274,11 +274,7 @@ subroutine step_therm1 (dt, iblk) nt_isosno, nt_isoice, nt_rsnw, nt_smice, nt_smliq logical (kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_iso, tr_aero, tr_pond, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_iso, tr_aero, tr_pond, & -#endif tr_pond_lvl, tr_pond_topo, calc_Tsfc, highfreq, tr_snow real (kind=dbl_kind) :: & @@ -304,11 +300,7 @@ subroutine step_therm1 (dt, iblk) call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_iso_out=tr_iso, & -#ifdef UNDEPRECATE_CESMPONDS - tr_aero_out=tr_aero, tr_pond_out=tr_pond, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_aero_out=tr_aero, tr_pond_out=tr_pond, & -#endif tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo, & tr_snow_out=tr_snow) call icepack_query_tracer_indices( & diff --git a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 index b2b438ebe..3e7abe3a3 100644 --- a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 @@ -58,11 +58,7 @@ subroutine init_restart_read(ice_ic) logical (kind=log_kind) :: & solve_zsal, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow character(len=char_len_long) :: & @@ -87,11 +83,7 @@ subroutine init_restart_read(ice_ic) nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, tr_fsd_out=tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine) call icepack_warnings_flush(nu_diag) @@ -236,28 +228,6 @@ subroutine init_restart_read(ice_ic) endif endif -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - if (my_task == master_task) then - n = index(filename0,trim(restart_file)) - if (n == 0) call abort_ice(subname//'ERROR: pond_cesm restart: filename discrepancy') - string1 = trim(filename0(1:n-1)) - string2 = trim(filename0(n+lenstr(restart_file):lenstr(filename0))) - write(filename,'(a,a,a,a)') & - string1(1:lenstr(string1)), & - restart_file(1:lenstr(restart_file)),'.pond_cesm', & - string2(1:lenstr(string2)) - if (restart_ext) then - call ice_open_ext(nu_restart_pond,filename,0) - else - call ice_open(nu_restart_pond,filename,0) - endif - read (nu_restart_pond) iignore,rignore,rignore - write(nu_diag,*) 'Reading ',filename(1:lenstr(filename)) - endif - endif -#endif - if (tr_pond_lvl) then if (my_task == master_task) then n = index(filename0,trim(restart_file)) @@ -424,11 +394,7 @@ subroutine init_restart_write(filename_spec) logical (kind=log_kind) :: & solve_zsal, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow integer (kind=int_kind) :: & @@ -444,11 +410,7 @@ subroutine init_restart_write(filename_spec) nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, tr_fsd_out=tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine) call icepack_warnings_flush(nu_diag) @@ -581,28 +543,6 @@ subroutine init_restart_write(filename_spec) endif -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - - write(filename,'(a,a,a,i4.4,a,i2.2,a,i2.2,a,i5.5)') & - restart_dir(1:lenstr(restart_dir)), & - restart_file(1:lenstr(restart_file)),'.pond_cesm.', & - myear,'-',mmonth,'-',mday,'-',msec - - if (restart_ext) then - call ice_open_ext(nu_dump_pond,filename,0) - else - call ice_open(nu_dump_pond,filename,0) - endif - - if (my_task == master_task) then - write(nu_dump_pond) istep1,timesecs,time_forc - write(nu_diag,*) 'Writing ',filename(1:lenstr(filename)) - endif - - endif -#endif - if (tr_pond_lvl) then write(filename,'(a,a,a,i4.4,a,i2.2,a,i2.2,a,i5.5)') & @@ -871,11 +811,7 @@ subroutine final_restart() logical (kind=log_kind) :: & solve_zsal, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow integer (kind=int_kind) :: & @@ -889,11 +825,7 @@ subroutine final_restart() nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine) call icepack_warnings_flush(nu_diag) @@ -908,9 +840,6 @@ subroutine final_restart() if (tr_iage) close(nu_dump_age) if (tr_FY) close(nu_dump_FY) if (tr_lvl) close(nu_dump_lvl) -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) close(nu_dump_pond) -#endif if (tr_pond_lvl) close(nu_dump_pond) if (tr_pond_topo) close(nu_dump_pond) if (tr_snow) close(nu_dump_snow) diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 index f647bd96b..ed49a48f5 100644 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 @@ -145,11 +145,7 @@ subroutine init_restart_write(filename_spec) logical (kind=log_kind) :: & solve_zsal, skl_bgc, z_tracers, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow, & tr_bgc_N, tr_bgc_C, tr_bgc_Nit, & tr_bgc_Sil, tr_bgc_DMS, & @@ -185,11 +181,7 @@ subroutine init_restart_write(filename_spec) nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, tr_fsd_out=tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine, & tr_bgc_N_out=tr_bgc_N, tr_bgc_C_out=tr_bgc_C, tr_bgc_Nit_out=tr_bgc_Nit, & @@ -416,13 +408,6 @@ subroutine init_restart_write(filename_spec) call define_rest_field(ncid,'vlvl',dims) end if -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - call define_rest_field(ncid,'apnd',dims) - call define_rest_field(ncid,'hpnd',dims) - end if -#endif - if (tr_pond_topo) then call define_rest_field(ncid,'apnd',dims) call define_rest_field(ncid,'hpnd',dims) diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 1124cc048..679a2b6e6 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -151,11 +151,7 @@ subroutine init_restart_write(filename_spec) solve_zsal, skl_bgc, z_tracers logical (kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, tr_pond_cesm, & -#else tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & -#endif tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow, & tr_bgc_N, tr_bgc_C, tr_bgc_Nit, & tr_bgc_Sil, tr_bgc_DMS, & @@ -191,11 +187,7 @@ subroutine init_restart_write(filename_spec) call icepack_query_tracer_sizes(nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - tr_iso_out=tr_iso, tr_aero_out=tr_aero, tr_pond_cesm_out=tr_pond_cesm, & -#else tr_iso_out=tr_iso, tr_aero_out=tr_aero, & -#endif tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & tr_snow_out=tr_snow, tr_brine_out=tr_brine, & tr_bgc_N_out=tr_bgc_N, tr_bgc_C_out=tr_bgc_C, tr_bgc_Nit_out=tr_bgc_Nit, & @@ -420,13 +412,6 @@ subroutine init_restart_write(filename_spec) call define_rest_field(File,'vlvl',dims) end if -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) then - call define_rest_field(File,'apnd',dims) - call define_rest_field(File,'hpnd',dims) - end if -#endif - if (tr_pond_topo) then call define_rest_field(File,'apnd',dims) call define_rest_field(File,'hpnd',dims) diff --git a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 index 87dc8d9a1..0b8ed689e 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 @@ -235,17 +235,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -260,11 +253,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -283,11 +272,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -346,21 +331,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 index 91f7985bd..b67e1a223 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_RunMod.F90 @@ -145,11 +145,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_bgc, write_restart_hbrine use ice_restart_driver, only: dumpfile @@ -172,11 +168,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -185,11 +177,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_fsd_out=tr_fsd) call icepack_warnings_flush(nu_diag) @@ -333,9 +321,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_fsd) call write_restart_fsd diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 index 87dc8d9a1..0b8ed689e 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 @@ -235,17 +235,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -260,11 +253,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -283,11 +272,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -346,21 +331,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 index ea6a65165..c9875d769 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_RunMod.F90 @@ -145,11 +145,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_bgc, write_restart_hbrine use ice_restart_driver, only: dumpfile @@ -172,11 +168,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -185,11 +177,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_fsd_out=tr_fsd) call icepack_warnings_flush(nu_diag) @@ -333,9 +321,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_fsd) call write_restart_fsd diff --git a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 index cfc5bece9..a8bf96ad2 100644 --- a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 @@ -264,17 +264,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -291,11 +284,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_snow, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -315,11 +304,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -381,21 +366,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 index b96086c6d..5836479b4 100644 --- a/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_RunMod.F90 @@ -137,11 +137,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -166,11 +162,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -188,11 +180,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -403,9 +391,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 index 3d5e5cc2a..5fbde9cce 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 @@ -222,17 +222,10 @@ subroutine init_restart() use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -249,11 +242,7 @@ subroutine init_restart() i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, tr_snow, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -274,11 +263,7 @@ subroutine init_restart() call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -340,21 +325,6 @@ subroutine init_restart() enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 index c68583648..e908f509f 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_RunMod.F90 @@ -121,11 +121,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -150,11 +146,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -172,11 +164,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -382,9 +370,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 index 78d462d4c..22596429d 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 @@ -271,17 +271,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -298,11 +291,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_snow, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -322,11 +311,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -388,21 +373,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 index 6e799723e..77bb7738e 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_RunMod.F90 @@ -157,11 +157,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -184,11 +180,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -206,11 +198,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -412,9 +400,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index 07a151a01..9c30b15a3 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -266,17 +266,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, init_snowtracers, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_snow, read_restart_snow, & @@ -293,11 +286,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_snow, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -317,11 +306,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_snow_out=tr_snow, tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -383,21 +368,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 index 00c7921d1..ae7f7ab1f 100644 --- a/cicecore/drivers/standalone/cice/CICE_RunMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_RunMod.F90 @@ -151,11 +151,7 @@ subroutine ice_step use ice_history_bgc, only: init_history_bgc use ice_restart, only: final_restart use ice_restart_column, only: write_restart_age, write_restart_FY, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_lvl, write_restart_pond_cesm, write_restart_pond_lvl, & -#else write_restart_lvl, write_restart_pond_lvl, & -#endif write_restart_pond_topo, write_restart_aero, write_restart_fsd, & write_restart_iso, write_restart_bgc, write_restart_hbrine, & write_restart_snow @@ -178,11 +174,7 @@ subroutine ice_step logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_fsd, tr_snow, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm, tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#else tr_pond_lvl, tr_pond_topo, tr_brine, tr_iso, tr_aero, & -#endif calc_Tsfc, skl_bgc, solve_zsal, z_tracers, wave_spec character(len=*), parameter :: subname = '(ice_step)' @@ -200,11 +192,7 @@ subroutine ice_step solve_zsal_out=solve_zsal, z_tracers_out=z_tracers, ktherm_out=ktherm, & wave_spec_out=wave_spec) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_aero_out=tr_aero, & tr_iso_out=tr_iso, tr_fsd_out=tr_fsd, tr_snow_out=tr_snow) call icepack_warnings_flush(nu_diag) @@ -408,9 +396,6 @@ subroutine ice_step if (tr_iage) call write_restart_age if (tr_FY) call write_restart_FY if (tr_lvl) call write_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS - if (tr_pond_cesm) call write_restart_pond_cesm -#endif if (tr_pond_lvl) call write_restart_pond_lvl if (tr_pond_topo) call write_restart_pond_topo if (tr_snow) call write_restart_snow diff --git a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 index ad355d783..84d1a3a60 100644 --- a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 @@ -241,17 +241,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -267,11 +260,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -290,11 +279,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -354,21 +339,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 index ad355d783..84d1a3a60 100644 --- a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 @@ -241,17 +241,10 @@ subroutine init_restart use ice_grid, only: tmask use ice_init, only: ice_ic use ice_init_column, only: init_age, init_FY, init_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd use ice_restart_column, only: restart_age, read_restart_age, & restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, read_restart_pond_cesm, & -#endif restart_pond_lvl, read_restart_pond_lvl, & restart_pond_topo, read_restart_pond_topo, & restart_fsd, read_restart_fsd, & @@ -267,11 +260,7 @@ subroutine init_restart i, j , & ! horizontal indices iblk ! block index logical(kind=log_kind) :: & -#ifdef UNDEPRECATE_CESMPONDS - tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & -#else tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & -#endif tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, & skl_bgc, z_tracers, solve_zsal integer(kind=int_kind) :: & @@ -290,11 +279,7 @@ subroutine init_restart call icepack_query_parameters(skl_bgc_out=skl_bgc, & z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & -#ifdef UNDEPRECATE_CESMPONDS - tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & @@ -354,21 +339,6 @@ subroutine init_restart enddo ! iblk endif endif -#ifdef UNDEPRECATE_CESMPONDS - ! CESM melt ponds - if (tr_pond_cesm) then - if (trim(runtype) == 'continue') & - restart_pond_cesm = .true. - if (restart_pond_cesm) then - call read_restart_pond_cesm - else - do iblk = 1, nblocks - call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & - trcrn(:,:,nt_hpnd,:,iblk)) - enddo ! iblk - endif - endif -#endif ! level-ice melt ponds if (tr_pond_lvl) then if (trim(runtype) == 'continue') & diff --git a/cicecore/shared/ice_init_column.F90 b/cicecore/shared/ice_init_column.F90 index 5339aa6ec..06ab79cdb 100644 --- a/cicecore/shared/ice_init_column.F90 +++ b/cicecore/shared/ice_init_column.F90 @@ -44,11 +44,7 @@ module ice_init_column private public :: init_thermo_vertical, init_shortwave, & init_age, init_FY, init_lvl, init_fsd, & -#ifdef UNDEPRECATE_CESMPONDS - init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & -#else init_meltponds_lvl, init_meltponds_topo, & -#endif init_aerosol, init_bgc, init_hbrine, init_zbgc, input_zbgc, & count_tracers, init_isotope, init_snowtracers @@ -547,23 +543,6 @@ subroutine init_lvl(iblk, alvl, vlvl) end subroutine init_lvl -#ifdef UNDEPRECATE_CESMPONDS -!======================================================================= - -! Initialize melt ponds. - - subroutine init_meltponds_cesm(apnd, hpnd) - - real(kind=dbl_kind), dimension(:,:,:), intent(out) :: & - apnd , & ! melt pond area fraction - hpnd ! melt pond depth - character(len=*),parameter :: subname='(init_meltponds_cesm)' - - apnd(:,:,:) = c0 - hpnd(:,:,:) = c0 - - end subroutine init_meltponds_cesm -#endif !======================================================================= ! Initialize melt ponds. @@ -1818,11 +1797,7 @@ subroutine count_tracers integer (kind=int_kind) :: ntrcr logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_pond, tr_aero, tr_fsd logical (kind=log_kind) :: tr_snow -#ifdef UNDEPRECATE_CESMPONDS - logical (kind=log_kind) :: tr_iso, tr_pond_cesm, tr_pond_lvl, tr_pond_topo -#else logical (kind=log_kind) :: tr_iso, tr_pond_lvl, tr_pond_topo -#endif integer (kind=int_kind) :: nt_Tsfc, nt_sice, nt_qice, nt_qsno, nt_iage, nt_FY integer (kind=int_kind) :: nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, nt_aero integer (kind=int_kind) :: nt_fsd, nt_isosno, nt_isoice @@ -1907,11 +1882,7 @@ subroutine count_tracers call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & tr_lvl_out=tr_lvl, tr_aero_out=tr_aero, tr_pond_out=tr_pond, & -#ifdef UNDEPRECATE_CESMPONDS - tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & -#else tr_pond_lvl_out=tr_pond_lvl, & -#endif tr_pond_topo_out=tr_pond_topo, tr_brine_out=tr_brine, tr_fsd_out=tr_fsd, & tr_snow_out=tr_snow, tr_iso_out=tr_iso, & tr_bgc_Nit_out=tr_bgc_Nit, tr_bgc_Am_out =tr_bgc_Am, tr_bgc_Sil_out=tr_bgc_Sil, & diff --git a/cicecore/shared/ice_restart_column.F90 b/cicecore/shared/ice_restart_column.F90 index a15f9d2c1..86ff170c7 100644 --- a/cicecore/shared/ice_restart_column.F90 +++ b/cicecore/shared/ice_restart_column.F90 @@ -29,9 +29,6 @@ module ice_restart_column public :: write_restart_age, read_restart_age, & write_restart_FY, read_restart_FY, & write_restart_lvl, read_restart_lvl, & -#ifdef UNDEPRECATE_CESMPONDS - write_restart_pond_cesm, read_restart_pond_cesm, & -#endif write_restart_pond_lvl, read_restart_pond_lvl, & write_restart_pond_topo, read_restart_pond_topo, & write_restart_snow, read_restart_snow, & @@ -45,9 +42,6 @@ module ice_restart_column restart_age , & ! if .true., read age tracer restart file restart_FY , & ! if .true., read FY tracer restart file restart_lvl , & ! if .true., read lvl tracer restart file -#ifdef UNDEPRECATE_CESMPONDS - restart_pond_cesm, & ! if .true., read meltponds restart file -#endif restart_pond_lvl , & ! if .true., read meltponds restart file restart_pond_topo, & ! if .true., read meltponds restart file restart_snow , & ! if .true., read snow tracer restart file @@ -260,74 +254,6 @@ subroutine read_restart_lvl() end subroutine read_restart_lvl -#ifdef UNDEPRECATE_CESMPONDS -!======================================================================= -! -! Dumps all values needed for restarting -! -! authors Elizabeth C. Hunke, LANL -! David A. Bailey, NCAR - - subroutine write_restart_pond_cesm() - - use ice_fileunits, only: nu_dump_pond - use ice_state, only: trcrn - - ! local variables - - logical (kind=log_kind) :: diag - integer (kind=int_kind) :: nt_apnd, nt_hpnd - character(len=*),parameter :: subname='(write_restart_pond_cesm)' - - call icepack_query_tracer_indices(nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - diag = .true. - - call write_restart_field(nu_dump_pond,0,trcrn(:,:,nt_apnd,:,:),'ruf8', & - 'apnd',ncat,diag) - call write_restart_field(nu_dump_pond,0,trcrn(:,:,nt_hpnd,:,:),'ruf8', & - 'hpnd',ncat,diag) - - end subroutine write_restart_pond_cesm - -!======================================================================= - -! Reads all values needed for a meltpond volume restart -! -! authors Elizabeth C. Hunke, LANL -! David A. Bailey, NCAR - - subroutine read_restart_pond_cesm() - - use ice_fileunits, only: nu_restart_pond - use ice_state, only: trcrn - - ! local variables - - logical (kind=log_kind) :: & - diag - integer (kind=int_kind) :: nt_apnd, nt_hpnd - character(len=*),parameter :: subname='(read_restart_pond_cesm)' - - call icepack_query_tracer_indices(nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - diag = .true. - - if (my_task == master_task) write(nu_diag,*) subname,'min/max cesm ponds' - - call read_restart_field(nu_restart_pond,0,trcrn(:,:,nt_apnd,:,:),'ruf8', & - 'apnd',ncat,diag,field_loc_center,field_type_scalar) - call read_restart_field(nu_restart_pond,0,trcrn(:,:,nt_hpnd,:,:),'ruf8', & - 'hpnd',ncat,diag,field_loc_center,field_type_scalar) - - end subroutine read_restart_pond_cesm -#endif !======================================================================= ! ! Dumps all values needed for restarting diff --git a/configuration/tools/cice4_restart_conversion/convert_restarts.f90 b/configuration/tools/cice4_restart_conversion/convert_restarts.f90 index 30c952510..3717f7d86 100644 --- a/configuration/tools/cice4_restart_conversion/convert_restarts.f90 +++ b/configuration/tools/cice4_restart_conversion/convert_restarts.f90 @@ -39,12 +39,8 @@ program convert_restarts logical (kind=log_kind), parameter :: & oceanmixed_ice = .true., & ! if true, read/write ocean mixed layer fields heat_capacity = .true., & ! if true, ice has nonzero heat capacity -#ifdef UNDEPRECATE_0LAYER - ! if false, use zero-layer thermodynamics -#else ! heat_capacity = .false. (zero-layer thermodynamics) ! has been deprecated in CICE and Icepack -#endif diag = .true. ! write min/max diagnostics for fields ! file names diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 763475992..a8b9d08f1 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -9,7 +9,7 @@ This index defines many of the symbols used frequently in the CICE model code. Namelist variables are partly included here but also documented elsewhere, see Section :ref:`tabnamelist`. All quantities in the code are expressed in MKS units (temperatures may take -either Celsius or Kelvin units). +either Celsius or Kelvin units). Deprecated parameters are listed at the end. .. csv-table:: *Alphabetical Index* :header: " ", " ", " " @@ -330,7 +330,7 @@ either Celsius or Kelvin units). "hpmin", "minimum melt pond depth (shortwave)", "0.005 m" "hpondn", "melt pond depth", "m" "hs_min", "minimum thickness for which :math:`T_s` is computed", "1.\ :math:`\times`\ 10\ :math:`^{-4}` m" - "hs0", "snow depth at which transition to ice occurs (dEdd)", "0.03 m" + "hs0", "snow depth at which transition to ice occurs (dEdd)", "m" "hs1", "snow depth of transition to pond ice", "0.03 m" "hs_ssl", "snow surface scattering layer thickness", "0.04 m" "Hstar", "determines mean thickness of ridged ice", "25. m" @@ -699,7 +699,6 @@ either Celsius or Kelvin units). "tr_FY", "if true, use first-year area tracer", "" "tr_iage", "if true, use ice age tracer", "" "tr_lvl", "if true, use level ice area and volume tracers", "" - "tr_pond_cesm", "if true, use CESM melt pond scheme", "" "tr_pond_lvl", "if true, use level-ice melt pond scheme", "" "tr_pond_topo", "if true, use topo melt pond scheme", "" "trcr", "ice tracers", "" @@ -772,9 +771,11 @@ either Celsius or Kelvin units). "zref", "reference height for stability", "10. m" "zTrf", "reference height for :math:`T_{ref}`, :math:`Q_{ref}`, :math:`U_{ref}`", "2. m" "zvir", "gas constant (water vapor)/gas constant (air) - 1", "0.606" - -.. - ktherm=0 has been deprecated + "**Deprecated options and parameters**", "", "" "heat_capacity", "if true, use salinity-dependent thermodynamics", "T" "kseaice", "thermal conductivity of ice for zero-layer thermodynamics", "2.0 W/m/deg" "ktherm", "thermodynamic formulation (0 = zero-layer, 1 = :cite:`Bitz99`, 2 = mushy)", "" + "tr_pond_cesm", "if true, use CESM melt pond scheme", "" + +.. + new deprecation comments diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index b2dd54c33..a34c69822 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -403,9 +403,6 @@ thermo_nml "``Rac_rapid_mode``", "real", "critical Rayleigh number", "10.0" "", "", "", "" -.. - ktherm=0 has been deprecated - "", "``0``", "zero-layer thermodynamic model", "" .. _dynamics_nml: @@ -538,7 +535,7 @@ ponds_nml "``frzpnd``", "``cesm``", "CESM pond refreezing forumulation", "``cesm``" "", "``hlid``", "Stefan refreezing with pond ice thickness", "" "``hp1``", "real", "critical ice lid thickness for topo ponds in m", "0.01" - "``hs0``", "real", "snow depth of transition to bare sea ice in m", "0.03" + "``hs0``", "real", "snow depth of transition to bare sea ice in m", "" "``hs1``", "real", "snow depth of transition to pond ice in m", "0.03" "``pndaspect``", "real", "aspect ratio of pond changes (depth:area)", "0.8" "``rfracmax``", ":math:`0 \le r_{max} \le 1`", "maximum melt water added to ponds", "0.85" diff --git a/icepack b/icepack index 460ddf8de..18fc1c9b7 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 460ddf8de6cf9f55572e8bf0e7672f24d6a7ec09 +Subproject commit 18fc1c9b79d81604eafdb1fac4ddd039b78ad390 From 9808b5195b2af77d2b70f5fec3e417b0a6020c5f Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Wed, 16 Nov 2022 05:26:57 -0700 Subject: [PATCH 37/57] CESM Updates (#785) * CESM updates * Rearrange some wave code * Add updates for UFS --- .../drivers/nuopc/cmeps/ice_comp_nuopc.F90 | 187 ++++++++++-------- .../drivers/nuopc/cmeps/ice_import_export.F90 | 25 ++- cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 | 54 ++++- 3 files changed, 166 insertions(+), 100 deletions(-) diff --git a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 index 182308973..afdee5590 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 @@ -17,12 +17,13 @@ module ice_comp_nuopc use NUOPC_Model , only : NUOPC_ModelGet, SetVM use ice_constants , only : ice_init_constants, c0 use ice_shr_methods , only : chkerr, state_setscalar, state_getscalar, state_diagnose, alarmInit - use ice_shr_methods , only : set_component_logging, get_component_instance, state_flddebug + use ice_shr_methods , only : get_component_instance, state_flddebug + use ice_import_export , only : ice_import, ice_export, ice_advertise_fields, ice_realize_fields use ice_domain_size , only : nx_global, ny_global use ice_grid , only : grid_type, init_grid2 use ice_communicate , only : init_communicate, my_task, master_task, mpi_comm_ice - use ice_calendar , only : force_restart_now, write_ic + use ice_calendar , only : force_restart_now, write_ic, init_calendar use ice_calendar , only : idate, mday, mmonth, myear, year_init use ice_calendar , only : msec, dt, calendar, calendar_type, nextsw_cday, istep use ice_calendar , only : ice_calendar_noleap, ice_calendar_gregorian @@ -40,7 +41,10 @@ module ice_comp_nuopc #ifdef CESMCOUPLED use shr_const_mod use shr_orb_mod , only : shr_orb_decl, shr_orb_params, SHR_ORB_UNDEF_REAL, SHR_ORB_UNDEF_INT - use ice_scam , only : scmlat, scmlon, scol_mask, scol_frac, scol_ni, scol_nj + use ice_scam , only : scmlat, scmlon, scol_mask, scol_frac, scol_ni, scol_nj, scol_area + use nuopc_shr_methods , only : set_component_logging +#else + use ice_shr_methods , only : set_component_logging #endif use ice_timers use CICE_InitMod , only : cice_init1, cice_init2 @@ -87,6 +91,15 @@ module ice_comp_nuopc integer :: dbug = 0 logical :: profile_memory = .false. + logical :: mastertask + integer :: start_ymd ! Start date (YYYYMMDD) + integer :: start_tod ! start time of day (s) + integer :: curr_ymd ! Current date (YYYYMMDD) + integer :: curr_tod ! Current time of day (s) + integer :: stop_ymd ! stop date (YYYYMMDD) + integer :: stop_tod ! stop time of day (sec) + integer :: ref_ymd ! Reference date (YYYYMMDD) + integer :: ref_tod ! reference time of day (s) integer , parameter :: debug_import = 0 ! internal debug level integer , parameter :: debug_export = 0 ! internal debug level character(*), parameter :: modName = "(ice_comp_nuopc)" @@ -201,12 +214,8 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) real(kind=dbl_kind) :: atmiter_conv_driver integer (kind=int_kind) :: natmiter integer (kind=int_kind) :: natmiter_driver - character(len=char_len) :: tfrz_option_driver ! tfrz_option from driver attributes - character(len=char_len) :: tfrz_option ! tfrz_option from cice namelist - integer(int_kind) :: ktherm integer :: localPet integer :: npes - logical :: mastertask type(ESMF_VM) :: vm integer :: lmpicom ! local communicator type(ESMF_Time) :: currTime ! Current time @@ -215,14 +224,6 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) type(ESMF_Time) :: refTime ! Ref time type(ESMF_TimeInterval) :: timeStep ! Model timestep type(ESMF_CalKind_Flag) :: esmf_caltype ! esmf calendar type - integer :: start_ymd ! Start date (YYYYMMDD) - integer :: start_tod ! start time of day (s) - integer :: curr_ymd ! Current date (YYYYMMDD) - integer :: curr_tod ! Current time of day (s) - integer :: stop_ymd ! stop date (YYYYMMDD) - integer :: stop_tod ! stop time of day (sec) - integer :: ref_ymd ! Reference date (YYYYMMDD) - integer :: ref_tod ! reference time of day (s) integer :: yy,mm,dd ! Temporaries for time query integer :: dtime ! time step integer :: shrlogunit ! original log unit @@ -232,12 +233,15 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) integer :: iblk, jblk ! indices integer :: ig, jg ! indices integer :: ilo, ihi, jlo, jhi ! beginning and end of physical domain + character(len=char_len_long) :: diag_filename = 'unset' character(len=char_len_long) :: logmsg character(len=char_len_long) :: single_column_lnd_domainfile real(dbl_kind) :: scol_lon real(dbl_kind) :: scol_lat real(dbl_kind) :: scol_spval + character(len=char_len) :: tfrz_option ! tfrz_option from cice namelist + character(len=char_len) :: tfrz_option_driver ! tfrz_option from cice namelist character(len=*), parameter :: subname=trim(modName)//':(InitializeAdvertise) ' !-------------------------------- @@ -541,12 +545,20 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call NUOPC_CompAttributeGet(gcomp, name='scol_nj', value=cvalue, rc=rc) if (chkerr(rc,__LINE__,u_FILE_u)) return read(cvalue,*) scol_nj + call NUOPC_CompAttributeGet(gcomp, name='scol_area', value=cvalue, rc=rc) + if (chkerr(rc,__LINE__,u_FILE_u)) return + read(cvalue,*) scol_area call ice_mesh_create_scolumn(scmlon, scmlat, ice_mesh, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return scol_valid = (scol_mask == 1) if (.not. scol_valid) then + ! Read the cice namelist as part of the call to cice_init1 + ! Note that if single_column is true and scol_valid is not - will never get here + call t_startf ('cice_init1') + call cice_init1 + call t_stopf ('cice_init1') ! Advertise fields call ice_advertise_fields(gcomp, importState, exportState, flds_scalar_name, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return @@ -559,13 +571,18 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) RETURN end if end if - ! Read the cice namelist as part of the call to cice_init1 ! Note that if single_column is true and scol_valid is not - will never get here - call t_startf ('cice_init1') call cice_init1 call t_stopf ('cice_init1') + + !----------------------------------------------------------------- + ! Advertise fields + !----------------------------------------------------------------- + call ice_advertise_fields(gcomp, importState, exportState, flds_scalar_name, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + ! Form of ocean freezing temperature ! 'minus1p8' = -1.8 C @@ -624,6 +641,12 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call cice_init1 call t_stopf ('cice_init1') + !----------------------------------------------------------------- + ! Advertise fields + !----------------------------------------------------------------- + call ice_advertise_fields(gcomp, importState, exportState, flds_scalar_name, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + #endif !---------------------------------------------------------------------------- @@ -675,6 +698,43 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) end if end if + call t_stopf ('cice_init_total') + + end subroutine InitializeAdvertise + + !=============================================================================== + + subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) + + ! Arguments + type(ESMF_GridComp) :: gcomp + type(ESMF_State) :: importState + type(ESMF_State) :: exportState + type(ESMF_Clock) :: clock + integer, intent(out) :: rc + + ! Local variables + integer :: n + integer :: fieldcount + type(ESMF_Field) :: lfield + character(len=char_len_long) :: cvalue + real(dbl_kind) :: scol_lon + real(dbl_kind) :: scol_lat + real(dbl_kind) :: scol_spval + real(dbl_kind), pointer :: fldptr1d(:) + real(dbl_kind), pointer :: fldptr2d(:,:) + integer :: rank + character(len=char_len) :: tfrz_option ! tfrz_option from cice namelist + integer(int_kind) :: ktherm + + character(len=char_len_long) :: single_column_lnd_domainfile + character(len=char_len_long) , pointer :: lfieldnamelist(:) => null() + character(len=*), parameter :: subname=trim(modName)//':(InitializeRealize) ' + !-------------------------------- + + rc = ESMF_SUCCESS + if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) + !---------------------------------------------------------------------------- ! Second cice initialization phase -after initializing grid info !---------------------------------------------------------------------------- @@ -685,29 +745,6 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call t_startf ('cice_init2') call cice_init2() call t_stopf ('cice_init2') - - !---------------------------------------------------------------------------- - ! reset shr logging to my log file - !---------------------------------------------------------------------------- - - call icepack_query_parameters(ktherm_out=ktherm) - call icepack_query_parameters(tfrz_option_out=tfrz_option) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - ! Now write output to nu_diag - this must happen AFTER call to cice_init - if (mastertask) then - write(nu_diag,'(a,d21.14)') trim(subname)//' cice init nextsw_cday = ',nextsw_cday - write(nu_diag,'(a)') trim(subname)//' tfrz_option = '//trim(tfrz_option) - if (ktherm == 2 .and. trim(tfrz_option) /= 'mushy') then - write(nu_diag,*) trim(subname),' Warning: Using ktherm = 2 and tfrz_option = ', trim(tfrz_option) - endif - write(nu_diag,'(a )') trim(subname)//' inst_name = '//trim(inst_name) - write(nu_diag,'(a,i8 )') trim(subname)//' inst_index = ',inst_index - write(nu_diag,'(a )') trim(subname)//' inst_suffix = ',trim(inst_suffix) - endif - !--------------------------------------------------------------------------- ! use EClock to reset calendar information on initial start !--------------------------------------------------------------------------- @@ -758,6 +795,30 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) end if call calendar() ! update calendar info + + !---------------------------------------------------------------------------- + ! reset shr logging to my log file + !---------------------------------------------------------------------------- + + call icepack_query_parameters(ktherm_out=ktherm) + call icepack_query_parameters(tfrz_option_out=tfrz_option) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + ! Now write output to nu_diag - this must happen AFTER call to cice_init + if (mastertask) then + write(nu_diag,'(a,d21.14)') trim(subname)//' cice init nextsw_cday = ',nextsw_cday + write(nu_diag,'(a)') trim(subname)//' tfrz_option = '//trim(tfrz_option) + if (ktherm == 2 .and. trim(tfrz_option) /= 'mushy') then + write(nu_diag,*) trim(subname),' Warning: Using ktherm = 2 and tfrz_option = ', trim(tfrz_option) + endif + write(nu_diag,'(a )') trim(subname)//' inst_name = '//trim(inst_name) + write(nu_diag,'(a,i8 )') trim(subname)//' inst_index = ',inst_index + write(nu_diag,'(a )') trim(subname)//' inst_suffix = ',trim(inst_suffix) + endif + + if (write_ic) then call accum_hist(dt) ! write initial conditions end if @@ -769,50 +830,6 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call ice_prescribed_init(clock, ice_mesh, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return - !----------------------------------------------------------------- - ! Advertise fields - !----------------------------------------------------------------- - - ! NOTE: the advertise phase needs to be called after the ice - ! initialization since the number of ice categories is needed for - ! ice_fraction_n and mean_sw_pen_to_ocn_ifrac_n - call ice_advertise_fields(gcomp, importState, exportState, flds_scalar_name, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return - - call t_stopf ('cice_init_total') - - end subroutine InitializeAdvertise - - !=============================================================================== - - subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) - - ! Arguments - type(ESMF_GridComp) :: gcomp - type(ESMF_State) :: importState - type(ESMF_State) :: exportState - type(ESMF_Clock) :: clock - integer, intent(out) :: rc - - ! Local variables - integer :: n - integer :: fieldcount - type(ESMF_Field) :: lfield - character(len=char_len_long) :: cvalue - real(dbl_kind) :: scol_lon - real(dbl_kind) :: scol_lat - real(dbl_kind) :: scol_spval - real(dbl_kind), pointer :: fldptr1d(:) - real(dbl_kind), pointer :: fldptr2d(:,:) - integer :: rank - character(len=char_len_long) :: single_column_lnd_domainfile - character(len=char_len_long) , pointer :: lfieldnamelist(:) => null() - character(len=*), parameter :: subname=trim(modName)//':(InitializeRealize) ' - !-------------------------------- - - rc = ESMF_SUCCESS - if (dbug > 5) call ESMF_LogWrite(subname//' called', ESMF_LOGMSG_INFO) - #ifdef CESMCOUPLED ! if single column is not valid - set all export state fields to zero and return if (single_column .and. .not. scol_valid) then @@ -848,7 +865,7 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) ! *** RETURN HERE *** ! ******************* RETURN - else + else if(single_column) then write(nu_diag,'(a,3(f10.5,2x))')' (ice_comp_nuopc) single column mode lon/lat/frac is ',& scmlon,scmlat,scol_frac end if diff --git a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 index d95a4d9b2..e4db010de 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 @@ -36,9 +36,9 @@ module ice_import_export use ice_shr_methods , only : chkerr, state_reset use icepack_intfc , only : icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc , only : icepack_query_parameters, icepack_query_tracer_flags - use icepack_intfc , only : icepack_query_tracer_indices use icepack_intfc , only : icepack_liquidus_temperature use icepack_intfc , only : icepack_sea_freezing_temperature + use icepack_intfc , only : icepack_query_tracer_indices use icepack_parameters , only : puny, c2 use cice_wrapper_mod , only : t_startf, t_stopf, t_barrierf #ifdef CESMCOUPLED @@ -307,6 +307,7 @@ end subroutine ice_advertise_fields !============================================================================== subroutine ice_realize_fields(gcomp, mesh, flds_scalar_name, flds_scalar_num, rc) + use ice_scam, only : single_column ! input/output variables type(ESMF_GridComp) :: gcomp @@ -320,7 +321,7 @@ subroutine ice_realize_fields(gcomp, mesh, flds_scalar_name, flds_scalar_num, rc type(ESMF_State) :: exportState type(ESMF_Field) :: lfield integer :: numOwnedElements - integer :: i, j, iblk, n, k + integer :: i, j, iblk, n integer :: ilo, ihi, jlo, jhi ! beginning and end of physical domain type(block) :: this_block ! block information for current block real(dbl_kind), allocatable :: mesh_areas(:) @@ -361,10 +362,10 @@ subroutine ice_realize_fields(gcomp, mesh, flds_scalar_name, flds_scalar_num, rc tag=subname//':CICE_Import',& mesh=mesh, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return - #ifdef CESMCOUPLED ! Get mesh areas from second field - using second field since the ! first field is the scalar field + if (single_column) return call ESMF_MeshGet(mesh, numOwnedElements=numOwnedElements, rc=rc) if (chkerr(rc,__LINE__,u_FILE_u)) return @@ -889,6 +890,8 @@ end subroutine ice_import !=============================================================================== subroutine ice_export( exportState, rc ) + use ice_scam, only : single_column + ! input/output variables type(ESMF_State), intent(inout) :: exportState integer , intent(out) :: rc @@ -911,12 +914,13 @@ subroutine ice_export( exportState, rc ) real (kind=dbl_kind) :: ailohi(nx_block,ny_block,max_blocks) ! fractional ice area real (kind=dbl_kind) :: floediam(nx_block,ny_block,max_blocks) real (kind=dbl_kind) :: floethick(nx_block,ny_block,max_blocks) ! ice thickness - real (kind=dbl_kind) :: Tffresh logical (kind=log_kind) :: tr_fsd integer (kind=int_kind) :: nt_fsd + real (kind=dbl_kind) :: Tffresh real (kind=dbl_kind), allocatable :: tempfld(:,:,:) real (kind=dbl_kind), pointer :: dataptr_ifrac_n(:,:) real (kind=dbl_kind), pointer :: dataptr_swpen_n(:,:) + logical (kind=log_kind), save :: first_call = .true. character(len=*),parameter :: subname = 'ice_export' !----------------------------------------------------- @@ -963,6 +967,9 @@ subroutine ice_export( exportState, rc ) ! ice fraction ailohi(i,j,iblk) = min(aice(i,j,iblk), c1) + ! surface temperature + Tsrf(i,j,iblk) = Tffresh + trcr(i,j,1,iblk) !Kelvin (original ???) + if (tr_fsd) then ! floe thickness (m) if (aice(i,j,iblk) > puny) then @@ -984,9 +991,6 @@ subroutine ice_export( exportState, rc ) floediam(i,j,iblk) = MAX(c2*floe_rad_c(1),workx) endif - ! surface temperature - Tsrf(i,j,iblk) = Tffresh + trcr(i,j,1,iblk) !Kelvin (original ???) - ! wind stress (on POP T-grid: convert to lat-lon) workx = strairxT(i,j,iblk) ! N/m^2 worky = strairyT(i,j,iblk) ! N/m^2 @@ -1042,8 +1046,11 @@ subroutine ice_export( exportState, rc ) !--------------------------------- ! Zero out fields with tmask for proper coupler accumulation in ice free areas - call state_reset(exportState, c0, rc) - if (ChkErr(rc,__LINE__,u_FILE_u)) return + if (first_call .or. .not.single_column) then + call state_reset(exportState, c0, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + first_call = .false. + endif ! Create a temporary field allocate(tempfld(nx_block,ny_block,nblocks)) diff --git a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 index 0b1b9349c..a9b19df6b 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_mesh_mod.F90 @@ -427,7 +427,7 @@ subroutine ice_mesh_create_scolumn(scol_lon, scol_lat, ice_mesh, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return ! Allocate module variable ocn_gridcell_frac - allocate(ocn_gridcell_frac(nx_block,ny_block,max_blocks)) + allocate(ocn_gridcell_frac(2,2,1)) ocn_gridcell_frac(:,:,:) = scol_frac end subroutine ice_mesh_create_scolumn @@ -560,7 +560,7 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) ! Check CICE mesh use ice_constants, only : c1,c0,c360 - use ice_grid , only : tlon, tlat + use ice_grid , only : tlon, tlat, hm ! input/output parameters type(ESMF_GridComp) , intent(inout) :: gcomp @@ -569,7 +569,8 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) ! local variables type(ESMF_DistGrid) :: distGrid - integer :: n,c,g,i,j,m ! indices + type(ESMF_Array) :: elemMaskArray + integer :: n,i,j ! indices integer :: iblk, jblk ! indices integer :: ilo, ihi, jlo, jhi ! beginning and end of physical domain type(block) :: this_block ! block information for current block @@ -578,11 +579,15 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) real(dbl_kind), pointer :: ownedElemCoords(:) real(dbl_kind), pointer :: lat(:), latMesh(:) real(dbl_kind), pointer :: lon(:), lonMesh(:) + integer , pointer :: model_mask(:) real(dbl_kind) :: diff_lon real(dbl_kind) :: diff_lat real(dbl_kind) :: rad_to_deg real(dbl_kind) :: tmplon, eps_imesh logical :: isPresent, isSet + logical :: mask_error + integer :: mask_internal + integer :: mask_file character(len=char_len_long) :: cvalue character(len=char_len_long) :: logmsg character(len=*), parameter :: subname = ' ice_mesh_check: ' @@ -606,7 +611,7 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) allocate(ownedElemCoords(spatialDim*numownedelements)) allocate(lonmesh(numOwnedElements)) allocate(latmesh(numOwnedElements)) - call ESMF_MeshGet(ice_mesh, ownedElemCoords=ownedElemCoords) + call ESMF_MeshGet(ice_mesh, ownedElemCoords=ownedElemCoords, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return do n = 1,numOwnedElements lonMesh(n) = ownedElemCoords(2*n-1) @@ -650,8 +655,45 @@ subroutine ice_mesh_check(gcomp, ice_mesh, rc) enddo enddo -100 format('ERROR: CICE n, lonmesh, lon, diff_lon = ',i6,2(f21.13,3x),d21.5) -101 format('ERROR: CICE n, latmesh, lat, diff_lat = ',i6,2(f21.13,3x),d21.5) + ! obtain internally generated ice mask for error checks + allocate(model_mask(numOwnedElements)) + call ESMF_MeshGet(ice_mesh, elementdistGrid=distGrid, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + elemMaskArray = ESMF_ArrayCreate(distGrid, model_mask, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call ESMF_MeshGet(ice_mesh, elemMaskArray=elemMaskArray, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + + mask_error = .false. + n=0 + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + do j = jlo, jhi + jlo = this_block%jlo + jhi = this_block%jhi + do i = ilo, ihi + ilo = this_block%ilo + ihi = this_block%ihi + n = n+1 + mask_internal = nint(hm(i,j,iblk),kind=dbl_kind) + mask_file = model_mask(n) + if (mask_internal /= mask_file) then + write(6,102) n,mask_internal,mask_file + mask_error = .true. + end if + enddo !i + enddo !j + enddo !iblk + if (mask_error) then + call abort_ice(error_message=subname, file=__FILE__, line=__LINE__) + end if + + call ESMF_ArrayDestroy(elemMaskArray, rc=rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + +100 format('ERROR: CICE n, mesh_lon , lon, diff_lon = ',i8,2(f21.13,3x),d21.5) +101 format('ERROR: CICE n, mesh_lat , lat, diff_lat = ',i8,2(f21.13,3x),d21.5) +102 format('ERROR: CICE n, mesh_internal, mask_file = ',i8,2(i2,2x)) ! deallocate memory deallocate(ownedElemCoords) From 99daf6942b78a57275261a1299ed4e3988708788 Mon Sep 17 00:00:00 2001 From: TRasmussen <33480590+TillRasmussen@users.noreply.github.com> Date: Thu, 17 Nov 2022 04:32:59 +0100 Subject: [PATCH 38/57] allocate c and cd var in evp, reduce number of "if grid_ice". (#778) * allocate c and cd var in evp, reduce number of if grid_ice. * Homogenized call init_dyn in CICE_initMod, rmoved first_call from loop in eap * bug fixes, removal of *.loc, moved more fields to be allocatable * Allign indents * read and write when iceumask, icenmask and iceemask are not allocated --- cicecore/cicedynB/dynamics/ice_dyn_eap.F90 | 129 ++-- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 604 ++++++++++-------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 45 +- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 14 +- .../infrastructure/ice_restart_driver.F90 | 156 +++-- .../infrastructure/io/io_pio2/ice_restart.F90 | 7 +- .../drivers/direct/hadgem3/CICE_InitMod.F90 | 14 +- .../direct/nemo_concepts/CICE_InitMod.F90 | 14 +- cicecore/drivers/mct/cesm1/CICE_InitMod.F90 | 12 +- cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 | 11 +- cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 | 31 +- .../drivers/standalone/cice/CICE_InitMod.F90 | 12 +- .../unittest/gridavgchk/CICE_InitMod.F90 | 12 +- .../drivers/unittest/sumchk/CICE_InitMod.F90 | 13 +- configuration/scripts/options/set_nml.dyneap | 2 + 15 files changed, 573 insertions(+), 503 deletions(-) create mode 100644 configuration/scripts/options/set_nml.dyneap diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 index 28a047c4e..e240fc8f1 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 @@ -25,6 +25,7 @@ module ice_dyn_eap p001, p027, p055, p111, p166, p222, p25, p333 use ice_fileunits, only: nu_diag, nu_dump_eap, nu_restart_eap use ice_exit, only: abort_ice + use ice_flux, only: rdg_shear ! use ice_timers, only: & ! ice_timer_start, ice_timer_stop, & ! timer_tmp1, timer_tmp2, timer_tmp3, timer_tmp4, & @@ -36,8 +37,7 @@ module ice_dyn_eap implicit none private - public :: eap, init_eap, write_restart_eap, read_restart_eap, & - alloc_dyn_eap + public :: eap, init_eap, write_restart_eap, read_restart_eap ! Look-up table needed for calculating structure tensor integer (int_kind), parameter :: & @@ -71,42 +71,16 @@ module ice_dyn_eap real (kind=dbl_kind) :: & puny, pi, pi2, piq, pih -!======================================================================= - - contains - -!======================================================================= -! Allocate space for all variables -! - subroutine alloc_dyn_eap + real (kind=dbl_kind), parameter :: & + kfriction = 0.45_dbl_kind - integer (int_kind) :: ierr + real (kind=dbl_kind), save :: & + invdx, invdy, invda, invsin - character(len=*), parameter :: subname = '(alloc_dyn_eap)' - allocate( a11_1 (nx_block,ny_block,max_blocks), & - a11_2 (nx_block,ny_block,max_blocks), & - a11_3 (nx_block,ny_block,max_blocks), & - a11_4 (nx_block,ny_block,max_blocks), & - a12_1 (nx_block,ny_block,max_blocks), & - a12_2 (nx_block,ny_block,max_blocks), & - a12_3 (nx_block,ny_block,max_blocks), & - a12_4 (nx_block,ny_block,max_blocks), & - e11 (nx_block,ny_block,max_blocks), & - e12 (nx_block,ny_block,max_blocks), & - e22 (nx_block,ny_block,max_blocks), & - yieldstress11(nx_block,ny_block,max_blocks), & - yieldstress12(nx_block,ny_block,max_blocks), & - yieldstress22(nx_block,ny_block,max_blocks), & - s11 (nx_block,ny_block,max_blocks), & - s12 (nx_block,ny_block,max_blocks), & - s22 (nx_block,ny_block,max_blocks), & - a11 (nx_block,ny_block,max_blocks), & - a12 (nx_block,ny_block,max_blocks), & - stat=ierr) - if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory') +!======================================================================= - end subroutine alloc_dyn_eap + contains !======================================================================= ! Elastic-anisotropic-plastic dynamics driver @@ -134,7 +108,8 @@ subroutine eap (dt) dyn_prep1, dyn_prep2, stepu, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & seabed_stress_method, seabed_stress, & - stack_fields, unstack_fields, iceTmask, iceUmask + stack_fields, unstack_fields, iceTmask, iceUmask, & + fld2, fld3, fld4 use ice_flux, only: rdg_conv, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & @@ -186,11 +161,6 @@ subroutine eap (dt) umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) - real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:), & ! temporary for stacking fields for halo update - fld3(:,:,:,:), & ! temporary for stacking fields for halo update - fld4(:,:,:,:) ! temporary for stacking fields for halo update - real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation @@ -214,10 +184,6 @@ subroutine eap (dt) ! Initialize !----------------------------------------------------------------- - allocate(fld2(nx_block,ny_block,2,max_blocks)) - allocate(fld3(nx_block,ny_block,3,max_blocks)) - allocate(fld4(nx_block,ny_block,4,max_blocks)) - ! This call is needed only if dt changes during runtime. ! call set_evp_parameters (dt) @@ -226,7 +192,7 @@ subroutine eap (dt) do j = 1, ny_block do i = 1, nx_block rdg_conv (i,j,iblk) = c0 -! rdg_shear(i,j,iblk) = c0 + rdg_shear(i,j,iblk) = c0 ! always zero. Could be moved divu (i,j,iblk) = c0 shear(i,j,iblk) = c0 e11(i,j,iblk) = c0 @@ -554,7 +520,6 @@ subroutine eap (dt) enddo ! subcycling - deallocate(fld2,fld3,fld4) if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) !----------------------------------------------------------------- @@ -588,6 +553,8 @@ subroutine init_eap use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks + use ice_calendar, only: dt_dyn + use ice_dyn_shared, only: init_dyn_shared ! local variables @@ -599,7 +566,7 @@ subroutine init_eap eps6 = 1.0e-6_dbl_kind integer (kind=int_kind) :: & - ix, iy, iz, ia + ix, iy, iz, ia, ierr integer (kind=int_kind), parameter :: & nz = 100 @@ -609,6 +576,8 @@ subroutine init_eap da, dx, dy, dz, & phi + real (kind=dbl_kind) :: invstressconviso + character(len=*), parameter :: subname = '(init_eap)' call icepack_query_parameters(puny_out=puny, & @@ -619,6 +588,31 @@ subroutine init_eap phi = pi/c12 ! diamond shaped floe smaller angle (default phi = 30 deg) + call init_dyn_shared(dt_dyn) + + allocate( a11_1 (nx_block,ny_block,max_blocks), & + a11_2 (nx_block,ny_block,max_blocks), & + a11_3 (nx_block,ny_block,max_blocks), & + a11_4 (nx_block,ny_block,max_blocks), & + a12_1 (nx_block,ny_block,max_blocks), & + a12_2 (nx_block,ny_block,max_blocks), & + a12_3 (nx_block,ny_block,max_blocks), & + a12_4 (nx_block,ny_block,max_blocks), & + e11 (nx_block,ny_block,max_blocks), & + e12 (nx_block,ny_block,max_blocks), & + e22 (nx_block,ny_block,max_blocks), & + yieldstress11(nx_block,ny_block,max_blocks), & + yieldstress12(nx_block,ny_block,max_blocks), & + yieldstress22(nx_block,ny_block,max_blocks), & + s11 (nx_block,ny_block,max_blocks), & + s12 (nx_block,ny_block,max_blocks), & + s22 (nx_block,ny_block,max_blocks), & + a11 (nx_block,ny_block,max_blocks), & + a12 (nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory') + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) do iblk = 1, nblocks do j = 1, ny_block @@ -640,6 +634,7 @@ subroutine init_eap a12_2 (i,j,iblk) = c0 a12_3 (i,j,iblk) = c0 a12_4 (i,j,iblk) = c0 + rdg_shear (i,j,iblk) = c0 enddo ! i enddo ! j enddo ! iblk @@ -657,6 +652,9 @@ subroutine init_eap zinit = -pih dy = pi/real(ny_yield-1,kind=dbl_kind) yinit = -dy + invdx = c1/dx + invdy = c1/dy + invda = c1/da do ia=1,na_yield do ix=1,nx_yield @@ -712,6 +710,12 @@ subroutine init_eap enddo enddo + ! Factor to maintain the same stress as in EVP (see Section 3) + ! Can be set to 1 otherwise + + invstressconviso = c1/(c1+kfriction*kfriction) + invsin = c1/sin(pi2/c12) * invstressconviso + end subroutine init_eap !======================================================================= @@ -1590,22 +1594,12 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & rotstemp11s, rotstemp12s, rotstemp22s, & sig11, sig12, sig22, & sgprm11, sgprm12, sgprm22, & - invstressconviso, & Angle_denom_gamma, Angle_denom_alpha, & Tany_1, Tany_2, & x, y, dx, dy, da, & dtemp1, dtemp2, atempprime, & kxw, kyw, kaw - real (kind=dbl_kind), save :: & - invdx, invdy, invda, invsin - - logical (kind=log_kind), save :: & - first_call = .true. - - real (kind=dbl_kind), parameter :: & - kfriction = 0.45_dbl_kind - ! tcraig, temporary, should be moved to namelist ! turns on interpolation in stress_rdg logical(kind=log_kind), parameter :: & @@ -1613,14 +1607,6 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & character(len=*), parameter :: subname = '(update_stress_rdg)' - ! Factor to maintain the same stress as in EVP (see Section 3) - ! Can be set to 1 otherwise - - if (first_call) then - invstressconviso = c1/(c1+kfriction*kfriction) - invsin = c1/sin(pi2/c12) * invstressconviso - endif - ! compute eigenvalues, eigenvectors and angles for structure tensor, strain rates ! 1) structure tensor @@ -1717,17 +1703,6 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & if (y > pi) y = y - pi if (y < 0) y = y + pi - ! Now calculate updated stress tensor - - if (first_call) then - dx = pi/real(nx_yield-1,kind=dbl_kind) - dy = pi/real(ny_yield-1,kind=dbl_kind) - da = p5/real(na_yield-1,kind=dbl_kind) - invdx = c1/dx - invdy = c1/dy - invda = c1/da - endif - if (interpolate_stress_rdg) then ! Interpolated lookup @@ -1869,8 +1844,6 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & + rotstemp22s*dtemp22 endif - first_call = .false. - end subroutine update_stress_rdg !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 index 8eab5e260..69305e131 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 @@ -55,7 +55,61 @@ module ice_dyn_evp implicit none private - public :: evp +! all c or cd + real (kind=dbl_kind), allocatable :: & + uocnN (:,:,:) , & ! i ocean current (m/s) + vocnN (:,:,:) , & ! j ocean current (m/s) + ss_tltxN (:,:,:) , & ! sea surface slope, x-direction (m/m) + ss_tltyN (:,:,:) , & ! sea surface slope, y-direction (m/m) + cdn_ocnN (:,:,:) , & ! ocn drag coefficient + waterxN (:,:,:) , & ! for ocean stress calculation, x (m/s) + wateryN (:,:,:) , & ! for ocean stress calculation, y (m/s) + forcexN (:,:,:) , & ! work array: combined atm stress and ocn tilt, x + forceyN (:,:,:) , & ! work array: combined atm stress and ocn tilt, y + aiN (:,:,:) , & ! ice fraction on N-grid + nmass (:,:,:) , & ! total mass of ice and snow (N grid) + nmassdti (:,:,:) ! mass of N-cell/dte (kg/m^2 s) +! all c or d + real (kind=dbl_kind), allocatable :: & + uocnE (:,:,:) , & ! i ocean current (m/s) + vocnE (:,:,:) , & ! j ocean current (m/s) + ss_tltxE (:,:,:) , & ! sea surface slope, x-direction (m/m) + ss_tltyE (:,:,:) , & ! sea surface slope, y-direction (m/m) + cdn_ocnE (:,:,:) , & ! ocn drag coefficient + waterxE (:,:,:) , & ! for ocean stress calculation, x (m/s) + wateryE (:,:,:) , & ! for ocean stress calculation, y (m/s) + forcexE (:,:,:) , & ! work array: combined atm stress and ocn tilt, x + forceyE (:,:,:) , & ! work array: combined atm stress and ocn tilt, y + aiE (:,:,:) , & ! ice fraction on E-grid + emass (:,:,:) , & ! total mass of ice and snow (E grid) + emassdti (:,:,:) ! mass of E-cell/dte (kg/m^2 s) + + real (kind=dbl_kind), allocatable :: & + strengthU(:,:,:) , & ! strength averaged to U points + divergU (:,:,:) , & ! div array on U points, differentiate from divu + tensionU (:,:,:) , & ! tension array on U points + shearU (:,:,:) , & ! shear array on U points + deltaU (:,:,:) , & ! delta array on U points + zetax2T (:,:,:) , & ! zetax2 = 2*zeta (bulk viscosity) + zetax2U (:,:,:) , & ! zetax2T averaged to U points + etax2T (:,:,:) , & ! etax2 = 2*eta (shear viscosity) + etax2U (:,:,:) ! etax2T averaged to U points + + real (kind=dbl_kind), allocatable :: & + uocnU (:,:,:) , & ! i ocean current (m/s) + vocnU (:,:,:) , & ! j ocean current (m/s) + ss_tltxU (:,:,:) , & ! sea surface slope, x-direction (m/m) + ss_tltyU (:,:,:) , & ! sea surface slope, y-direction (m/m) + cdn_ocnU (:,:,:) , & ! ocn drag coefficient + tmass (:,:,:) , & ! total mass of ice and snow (kg/m^2) + waterxU (:,:,:) , & ! for ocean stress calculation, x (m/s) + wateryU (:,:,:) , & ! for ocean stress calculation, y (m/s) + forcexU (:,:,:) , & ! work array: combined atm stress and ocn tilt, x + forceyU (:,:,:) , & ! work array: combined atm stress and ocn tilt, y + umass (:,:,:) , & ! total mass of ice and snow (u grid) + umassdti (:,:,:) ! mass of U-cell/dte (kg/m^2 s) + + public :: evp, init_evp !======================================================================= @@ -64,6 +118,84 @@ module ice_dyn_evp !======================================================================= ! Elastic-viscous-plastic dynamics driver ! + subroutine init_evp + use ice_blocks, only: nx_block, ny_block + use ice_domain_size, only: max_blocks + use ice_grid, only: grid_ice + use ice_calendar, only: dt_dyn + use ice_dyn_shared, only: init_dyn_shared + +!allocate c and cd grid var. Follow structucre of eap + integer (int_kind) :: ierr + + character(len=*), parameter :: subname = '(alloc_dyn_evp)' + + call init_dyn_shared(dt_dyn) + + allocate( uocnU (nx_block,ny_block,max_blocks), & ! i ocean current (m/s) + vocnU (nx_block,ny_block,max_blocks), & ! j ocean current (m/s) + ss_tltxU (nx_block,ny_block,max_blocks), & ! sea surface slope, x-direction (m/m) + ss_tltyU (nx_block,ny_block,max_blocks), & ! sea surface slope, y-direction (m/m) + cdn_ocnU (nx_block,ny_block,max_blocks), & ! ocn drag coefficient + tmass (nx_block,ny_block,max_blocks), & ! total mass of ice and snow (kg/m^2) + waterxU (nx_block,ny_block,max_blocks), & ! for ocean stress calculation, x (m/s) + wateryU (nx_block,ny_block,max_blocks), & ! for ocean stress calculation, y (m/s) + forcexU (nx_block,ny_block,max_blocks), & ! work array: combined atm stress and ocn tilt, x + forceyU (nx_block,ny_block,max_blocks), & ! work array: combined atm stress and ocn tilt, y + umass (nx_block,ny_block,max_blocks), & ! total mass of ice and snow (u grid) + umassdti (nx_block,ny_block,max_blocks), & ! mass of U-cell/dte (kg/m^2 s) + stat=ierr) + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory B-Grid evp') + + + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + allocate( strengthU(nx_block,ny_block,max_blocks), & + divergU (nx_block,ny_block,max_blocks), & + tensionU (nx_block,ny_block,max_blocks), & + shearU (nx_block,ny_block,max_blocks), & + deltaU (nx_block,ny_block,max_blocks), & + zetax2T (nx_block,ny_block,max_blocks), & + zetax2U (nx_block,ny_block,max_blocks), & + etax2T (nx_block,ny_block,max_blocks), & + etax2U (nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory U evp') + + allocate( uocnN (nx_block,ny_block,max_blocks), & + vocnN (nx_block,ny_block,max_blocks), & + ss_tltxN (nx_block,ny_block,max_blocks), & + ss_tltyN (nx_block,ny_block,max_blocks), & + cdn_ocnN (nx_block,ny_block,max_blocks), & + waterxN (nx_block,ny_block,max_blocks), & + wateryN (nx_block,ny_block,max_blocks), & + forcexN (nx_block,ny_block,max_blocks), & + forceyN (nx_block,ny_block,max_blocks), & + aiN (nx_block,ny_block,max_blocks), & + nmass (nx_block,ny_block,max_blocks), & + nmassdti (nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory N evp') + + allocate( uocnE (nx_block,ny_block,max_blocks), & + vocnE (nx_block,ny_block,max_blocks), & + ss_tltxE (nx_block,ny_block,max_blocks), & + ss_tltyE (nx_block,ny_block,max_blocks), & + cdn_ocnE (nx_block,ny_block,max_blocks), & + waterxE (nx_block,ny_block,max_blocks), & + wateryE (nx_block,ny_block,max_blocks), & + forcexE (nx_block,ny_block,max_blocks), & + forceyE (nx_block,ny_block,max_blocks), & + aiE (nx_block,ny_block,max_blocks), & + emass (nx_block,ny_block,max_blocks), & + emassdti (nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory E evp') + + endif + + end subroutine init_evp + #ifdef CICE_IN_NEMO ! Wind stress is set during this routine from the values supplied ! via NEMO (unless calc_strair is true). These values are supplied @@ -116,7 +248,7 @@ subroutine evp (dt) DminTarea, visc_method, deformations, deformationsC_T, deformationsCD_T, & strain_rates_U, & iceTmask, iceUmask, iceEmask, iceNmask, & - dyn_haloUpdate + dyn_haloUpdate, fld2, fld3, fld4 real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -145,64 +277,6 @@ subroutine evp (dt) indxUi , & ! compressed index in i-direction indxUj ! compressed index in j-direction - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - uocnU , & ! i ocean current (m/s) - vocnU , & ! j ocean current (m/s) - ss_tltxU , & ! sea surface slope, x-direction (m/m) - ss_tltyU , & ! sea surface slope, y-direction (m/m) - cdn_ocnU , & ! ocn drag coefficient - tmass , & ! total mass of ice and snow (kg/m^2) - waterxU , & ! for ocean stress calculation, x (m/s) - wateryU , & ! for ocean stress calculation, y (m/s) - forcexU , & ! work array: combined atm stress and ocn tilt, x - forceyU , & ! work array: combined atm stress and ocn tilt, y - umass , & ! total mass of ice and snow (u grid) - umassdti ! mass of U-cell/dte (kg/m^2 s) - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - uocnN , & ! i ocean current (m/s) - vocnN , & ! j ocean current (m/s) - ss_tltxN , & ! sea surface slope, x-direction (m/m) - ss_tltyN , & ! sea surface slope, y-direction (m/m) - cdn_ocnN , & ! ocn drag coefficient - waterxN , & ! for ocean stress calculation, x (m/s) - wateryN , & ! for ocean stress calculation, y (m/s) - forcexN , & ! work array: combined atm stress and ocn tilt, x - forceyN , & ! work array: combined atm stress and ocn tilt, y - aiN , & ! ice fraction on N-grid - nmass , & ! total mass of ice and snow (N grid) - nmassdti ! mass of N-cell/dte (kg/m^2 s) - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - uocnE , & ! i ocean current (m/s) - vocnE , & ! j ocean current (m/s) - ss_tltxE , & ! sea surface slope, x-direction (m/m) - ss_tltyE , & ! sea surface slope, y-direction (m/m) - cdn_ocnE , & ! ocn drag coefficient - waterxE , & ! for ocean stress calculation, x (m/s) - wateryE , & ! for ocean stress calculation, y (m/s) - forcexE , & ! work array: combined atm stress and ocn tilt, x - forceyE , & ! work array: combined atm stress and ocn tilt, y - aiE , & ! ice fraction on E-grid - emass , & ! total mass of ice and snow (E grid) - emassdti ! mass of E-cell/dte (kg/m^2 s) - - real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:), & ! 2 bundled fields - fld3(:,:,:,:), & ! 3 bundled fields - fld4(:,:,:,:) ! 4 bundled fields - - real (kind=dbl_kind), allocatable :: & - strengthU(:,:,:), & ! strength averaged to U points - divergU (:,:,:), & ! div array on U points, differentiate from divu - tensionU (:,:,:), & ! tension array on U points - shearU (:,:,:), & ! shear array on U points - deltaU (:,:,:), & ! delta array on U points - zetax2T (:,:,:), & ! zetax2 = 2*zeta (bulk viscosity) - zetax2U (:,:,:), & ! zetax2T averaged to U points - etax2T (:,:,:), & ! etax2 = 2*eta (shear viscosity) - etax2U (:,:,:) ! etax2T averaged to U points - real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation @@ -218,9 +292,6 @@ subroutine evp (dt) type (block) :: & this_block ! block information for current block - logical (kind=log_kind), save :: & - first_time = .true. ! first time logical - character(len=*), parameter :: subname = '(evp)' call ice_timer_start(timer_dynamics) ! dynamics @@ -229,21 +300,8 @@ subroutine evp (dt) ! Initialize !----------------------------------------------------------------- - allocate(fld2(nx_block,ny_block,2,max_blocks)) - allocate(fld3(nx_block,ny_block,3,max_blocks)) - allocate(fld4(nx_block,ny_block,4,max_blocks)) - if (grid_ice == 'CD' .or. grid_ice == 'C') then - allocate(strengthU(nx_block,ny_block,max_blocks)) - allocate(divergU (nx_block,ny_block,max_blocks)) - allocate(tensionU (nx_block,ny_block,max_blocks)) - allocate(shearU (nx_block,ny_block,max_blocks)) - allocate(deltaU (nx_block,ny_block,max_blocks)) - allocate(zetax2T (nx_block,ny_block,max_blocks)) - allocate(zetax2U (nx_block,ny_block,max_blocks)) - allocate(etax2T (nx_block,ny_block,max_blocks)) - allocate(etax2U (nx_block,ny_block,max_blocks)) strengthU(:,:,:) = c0 divergU (:,:,:) = c0 tensionU (:,:,:) = c0 @@ -383,20 +441,20 @@ subroutine evp (dt) endif endif - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks + if (trim(grid_ice) == 'B') then + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) + do iblk = 1, nblocks - !----------------------------------------------------------------- - ! more preparation for dynamics - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! more preparation for dynamics + !----------------------------------------------------------------- - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi - if (trim(grid_ice) == 'B') then call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & icellT (iblk), icellU (iblk), & @@ -409,7 +467,7 @@ subroutine evp (dt) strairxU (:,:,iblk), strairyU (:,:,iblk), & ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & iceTmask (:,:,iblk), iceUmask (:,:,iblk), & - fmU (:,:,iblk), dt, & + fmU (:,:,iblk), dt , & strtltxU (:,:,iblk), strtltyU (:,:,iblk), & strocnxU (:,:,iblk), strocnyU (:,:,iblk), & strintxU (:,:,iblk), strintyU (:,:,iblk), & @@ -426,7 +484,35 @@ subroutine evp (dt) uvel (:,:,iblk), vvel (:,:,iblk), & TbU (:,:,iblk)) - elseif (trim(grid_ice) == 'CD' .or. grid_ice == 'C') then + !----------------------------------------------------------------- + ! ice strength + !----------------------------------------------------------------- + + strength(:,:,iblk) = c0 ! initialize + do ij = 1, icellT(iblk) + i = indxTi(ij, iblk) + j = indxTj(ij, iblk) + call icepack_ice_strength(ncat = ncat, & + aice = aice (i,j, iblk), & + vice = vice (i,j, iblk), & + aice0 = aice0 (i,j, iblk), & + aicen = aicen (i,j,:,iblk), & + vicen = vicen (i,j,:,iblk), & + strength = strength(i,j, iblk)) + enddo ! ij + + enddo ! iblk + !$OMP END PARALLEL DO + elseif (trim(grid_ice) == 'CD' .or. grid_ice == 'C') then + !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) + do iblk = 1, nblocks + + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi + call dyn_prep2 (nx_block, ny_block, & ilo, ihi, jlo, jhi, & icellT (iblk), icellU (iblk), & @@ -455,122 +541,108 @@ subroutine evp (dt) uvel_init (:,:,iblk), vvel_init (:,:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & TbU (:,:,iblk)) - endif - !----------------------------------------------------------------- - ! ice strength - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! ice strength + !----------------------------------------------------------------- - strength(:,:,iblk) = c0 ! initialize - do ij = 1, icellT(iblk) - i = indxTi(ij, iblk) - j = indxTj(ij, iblk) - call icepack_ice_strength(ncat = ncat, & - aice = aice (i,j, iblk), & - vice = vice (i,j, iblk), & - aice0 = aice0 (i,j, iblk), & - aicen = aicen (i,j,:,iblk), & - vicen = vicen (i,j,:,iblk), & - strength = strength(i,j, iblk) ) - enddo ! ij - - enddo ! iblk - !$OMP END PARALLEL DO + strength(:,:,iblk) = c0 ! initialize + do ij = 1, icellT(iblk) + i = indxTi(ij, iblk) + j = indxTj(ij, iblk) + call icepack_ice_strength(ncat = ncat, & + aice = aice (i,j, iblk), & + vice = vice (i,j, iblk), & + aice0 = aice0 (i,j, iblk), & + aicen = aicen (i,j,:,iblk), & + vicen = vicen (i,j,:,iblk), & + strength = strength(i,j, iblk) ) + enddo ! ij - if (grid_ice == 'CD' .or. grid_ice == 'C') then - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks + !----------------------------------------------------------------- + ! more preparation for dynamics on N grid + !----------------------------------------------------------------- - !----------------------------------------------------------------- - ! more preparation for dynamics on N grid - !----------------------------------------------------------------- + call dyn_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellT (iblk), icellN (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & + aiN (:,:,iblk), nmass (:,:,iblk), & + nmassdti (:,:,iblk), fcorN_blk (:,:,iblk), & + nmask (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + strairxN (:,:,iblk), strairyN (:,:,iblk), & + ss_tltxN (:,:,iblk), ss_tltyN (:,:,iblk), & + iceTmask (:,:,iblk), iceNmask (:,:,iblk), & + fmN (:,:,iblk), dt , & + strtltxN (:,:,iblk), strtltyN (:,:,iblk), & + strocnxN (:,:,iblk), strocnyN (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + taubxN (:,:,iblk), taubyN (:,:,iblk), & + waterxN (:,:,iblk), wateryN (:,:,iblk), & + forcexN (:,:,iblk), forceyN (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi + !----------------------------------------------------------------- + ! more preparation for dynamics on E grid + !----------------------------------------------------------------- - call dyn_prep2 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - icellT (iblk), icellN (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - aiN (:,:,iblk), nmass (:,:,iblk), & - nmassdti (:,:,iblk), fcorN_blk (:,:,iblk), & - nmask (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - strairxN (:,:,iblk), strairyN (:,:,iblk), & - ss_tltxN (:,:,iblk), ss_tltyN (:,:,iblk), & - iceTmask (:,:,iblk), iceNmask (:,:,iblk), & - fmN (:,:,iblk), dt, & - strtltxN (:,:,iblk), strtltyN (:,:,iblk), & - strocnxN (:,:,iblk), strocnyN (:,:,iblk), & - strintxN (:,:,iblk), strintyN (:,:,iblk), & - taubxN (:,:,iblk), taubyN (:,:,iblk), & - waterxN (:,:,iblk), wateryN (:,:,iblk), & - forcexN (:,:,iblk), forceyN (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) + call dyn_prep2 (nx_block, ny_block, & + ilo, ihi, jlo, jhi, & + icellT (iblk), icellE (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & + aiE (:,:,iblk), emass (:,:,iblk), & + emassdti (:,:,iblk), fcorE_blk (:,:,iblk), & + emask (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + strairxE (:,:,iblk), strairyE (:,:,iblk), & + ss_tltxE (:,:,iblk), ss_tltyE (:,:,iblk), & + iceTmask (:,:,iblk), iceEmask (:,:,iblk), & + fmE (:,:,iblk), dt , & + strtltxE (:,:,iblk), strtltyE (:,:,iblk), & + strocnxE (:,:,iblk), strocnyE (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + taubxE (:,:,iblk), taubyE (:,:,iblk), & + waterxE (:,:,iblk), wateryE (:,:,iblk), & + forcexE (:,:,iblk), forceyE (:,:,iblk), & + stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & + stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & + stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & + stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & + stress12_1(:,:,iblk), stress12_2(:,:,iblk), & + stress12_3(:,:,iblk), stress12_4(:,:,iblk), & + uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) - !----------------------------------------------------------------- - ! more preparation for dynamics on E grid - !----------------------------------------------------------------- - call dyn_prep2 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - icellT (iblk), icellE (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - aiE (:,:,iblk), emass (:,:,iblk), & - emassdti (:,:,iblk), fcorE_blk (:,:,iblk), & - emask (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - strairxE (:,:,iblk), strairyE (:,:,iblk), & - ss_tltxE (:,:,iblk), ss_tltyE (:,:,iblk), & - iceTmask (:,:,iblk), iceEmask (:,:,iblk), & - fmE (:,:,iblk), dt, & - strtltxE (:,:,iblk), strtltyE (:,:,iblk), & - strocnxE (:,:,iblk), strocnyE (:,:,iblk), & - strintxE (:,:,iblk), strintyE (:,:,iblk), & - taubxE (:,:,iblk), taubyE (:,:,iblk), & - waterxE (:,:,iblk), wateryE (:,:,iblk), & - forcexE (:,:,iblk), forceyE (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) - - - do i=1,nx_block - do j=1,ny_block - if (.not.iceUmask(i,j,iblk)) then - stresspU (i,j,iblk) = c0 - stressmU (i,j,iblk) = c0 - stress12U(i,j,iblk) = c0 - endif - if (.not.iceTmask(i,j,iblk)) then - stresspT (i,j,iblk) = c0 - stressmT (i,j,iblk) = c0 - stress12T(i,j,iblk) = c0 - endif - enddo - enddo - enddo ! iblk - !$OMP END PARALLEL DO + do i=1,nx_block + do j=1,ny_block + if (.not.iceUmask(i,j,iblk)) then + stresspU (i,j,iblk) = c0 + stressmU (i,j,iblk) = c0 + stress12U(i,j,iblk) = c0 + endif + if (.not.iceTmask(i,j,iblk)) then + stresspT (i,j,iblk) = c0 + stressmT (i,j,iblk) = c0 + stress12T(i,j,iblk) = c0 + endif + enddo + enddo + enddo ! iblk + !$OMP END PARALLEL DO endif ! grid_ice @@ -721,10 +793,6 @@ subroutine evp (dt) if (evp_algorithm == "shared_mem_1d" ) then - if (first_time .and. my_task == master_task) then - write(nu_diag,'(3a)') subname,' Entering evp_algorithm version ',evp_algorithm - first_time = .false. - endif if (trim(grid_type) == 'tripole') then call abort_ice(trim(subname)//' & & Kernel not tested on tripole grid. Set evp_algorithm=standard_2d') @@ -1211,12 +1279,6 @@ subroutine evp (dt) call ice_timer_stop(timer_evp_2d) endif ! evp_algorithm - deallocate(fld2,fld3,fld4) - if (grid_ice == 'CD' .or. grid_ice == 'C') then - deallocate(strengthU, divergU, tensionU, shearU, deltaU) - deallocate(zetax2T, zetax2U, etax2T, etax2U) - endif - if (maskhalo_dyn) then call ice_HaloDestroy(halo_info_mask) endif @@ -1658,17 +1720,17 @@ end subroutine stress ! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. - subroutine stressC_T (nx_block, ny_block , & - icellT , & - indxTi , indxTj , & - uvelE , vvelE , & - uvelN , vvelN , & - dxN , dyE , & - dxT , dyT , & - uarea , DminTarea, & - strength, shearU , & - zetax2T , etax2T , & - stressp , stressm ) + subroutine stressC_T (nx_block, ny_block , & + icellT , & + indxTi , indxTj , & + uvelE , vvelE , & + uvelN , vvelN , & + dxN , dyE , & + dxT , dyT , & + uarea , DminTarea , & + strength , shearU , & + zetax2T , etax2T , & + stressp , stressm ) use ice_dyn_shared, only: strain_rates_T, capping, & visc_replpress, e_factor @@ -1682,24 +1744,24 @@ subroutine stressC_T (nx_block, ny_block , & indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the E point - uvelN , & ! x-component of velocity (m/s) at the N point - vvelN , & ! y-component of velocity (m/s) at the N point - dxN , & ! width of N-cell through the middle (m) - dyE , & ! height of E-cell through the middle (m) - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - strength , & ! ice strength (N/m) - shearU , & ! shearU - uarea , & ! area of u cell - DminTarea ! deltaminEVP*tarea + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the E point + uvelN , & ! x-component of velocity (m/s) at the N point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) + strength , & ! ice strength (N/m) + shearU , & ! shearU local for this routine + uarea , & ! area of u cell + DminTarea ! deltaminEVP*tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - zetax2T , & ! zetax2 = 2*zeta (bulk viscosity) - etax2T , & ! etax2 = 2*eta (shear viscosity) - stressp , & ! sigma11+sigma22 - stressm ! sigma11-sigma22 + zetax2T , & ! zetax2 = 2*zeta (bulk viscosity) + etax2T , & ! etax2 = 2*eta (shear viscosity) + stressp , & ! sigma11+sigma22 + stressm ! sigma11-sigma22 ! local variables @@ -1707,8 +1769,8 @@ subroutine stressC_T (nx_block, ny_block , & i, j, ij real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - divT , & ! divergence at T point - tensionT ! tension at T point + divT , & ! divergence at T point + tensionT ! tension at T point real (kind=dbl_kind) :: & shearTsqr , & ! strain rates squared at T point @@ -1728,7 +1790,7 @@ subroutine stressC_T (nx_block, ny_block , & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & dxT (:,:), dyT (:,:), & - divT (:,:), tensionT(:,:) ) + divT (:,:), tensionT(:,:)) do ij = 1, icellT i = indxTi(ij) @@ -1752,7 +1814,7 @@ subroutine stressC_T (nx_block, ny_block , & !----------------------------------------------------------------- call visc_replpress (strength(i,j), DminTarea(i,j), DeltaT, & - zetax2T (i,j), etax2T (i,j), rep_prsT, capping) + zetax2T (i,j), etax2T(i,j), rep_prsT, capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -1783,13 +1845,13 @@ end subroutine stressC_T ! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method ! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. - subroutine stressC_U (nx_block , ny_block, & - icellU, & - indxUi , indxUj, & + subroutine stressC_U (nx_block , ny_block ,& + icellU ,& + indxUi , indxUj ,& uarea , & - etax2U , deltaU, & - strengthU, shearU, & - stress12 ) + etax2U , deltaU ,& + strengthU, shearU ,& + stress12) use ice_dyn_shared, only: visc_replpress, & visc_method, deltaminEVP, capping @@ -1847,7 +1909,7 @@ subroutine stressC_U (nx_block , ny_block, & DminUarea = deltaminEVP*uarea(i,j) ! only need etax2U here, but other terms are calculated with etax2U ! minimal extra calculations here even though it seems like there is - call visc_replpress (strengthU(i,j), DminUarea, DeltaU(i,j), & + call visc_replpress (strengthU(i,j), DminUarea, deltaU(i,j), & lzetax2U , letax2U , lrep_prsU , capping) stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & + arlx1i*p5*letax2U*shearU(i,j)) * denom1 @@ -1863,18 +1925,18 @@ end subroutine stressC_U ! author: JF Lemieux, ECCC ! Nov 2021 - subroutine stressCD_T (nx_block, ny_block, & - icellT, & - indxTi, indxTj, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - DminTarea, & - strength, & - zetax2T, etax2T, & - stresspT, stressmT, & - stress12T ) + subroutine stressCD_T (nx_block, ny_block , & + icellT , & + indxTi , indxTj , & + uvelE , vvelE , & + uvelN , vvelN , & + dxN , dyE , & + dxT , dyT , & + DminTarea, & + strength, & + zetax2T , etax2T , & + stresspT, stressmT , & + stress12T) use ice_dyn_shared, only: strain_rates_T, capping, & visc_replpress @@ -1929,13 +1991,13 @@ subroutine stressCD_T (nx_block, ny_block, & call strain_rates_T (nx_block , ny_block , & icellT , & - indxTi(:) , indxTj (:) , & + indxTi (:), indxTj (:) , & uvelE (:,:), vvelE (:,:), & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & dxT (:,:), dyT (:,:), & divT (:,:), tensionT(:,:), & - shearT(:,:), DeltaT (:,:) ) + shearT(:,:), DeltaT (:,:)) do ij = 1, icellT i = indxTi(ij) @@ -1946,7 +2008,7 @@ subroutine stressCD_T (nx_block, ny_block, & !----------------------------------------------------------------- call visc_replpress (strength(i,j), DminTarea(i,j), DeltaT(i,j), & - zetax2T (i,j), etax2T (i,j), rep_prsT , capping) + zetax2T (i,j), etax2T(i,j), rep_prsT , capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -1973,16 +2035,16 @@ end subroutine stressCD_T ! author: JF Lemieux, ECCC ! Nov 2021 - subroutine stressCD_U (nx_block, ny_block, & - icellU, & - indxUi, indxUj, & - uarea, & - zetax2U, etax2U, & - strengthU, & - divergU, tensionU, & - shearU, DeltaU, & - stresspU, stressmU, & - stress12U ) + subroutine stressCD_U (nx_block, ny_block, & + icellU , & + indxUi , indxUj , & + uarea , & + zetax2U , etax2U , & + strengthU , & + divergU , tensionU, & + shearU , deltaU , & + stresspU , stressmU, & + stress12U) use ice_dyn_shared, only: strain_rates_U, & visc_replpress, & @@ -1997,7 +2059,7 @@ subroutine stressCD_U (nx_block, ny_block, & indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uarea , & ! area of U-cell (m^2) + uarea , & ! area of U-cell (m^2) zetax2U , & ! 2*zeta at U point etax2U , & ! 2*eta at U point strengthU, & ! ice strength at U point @@ -2043,7 +2105,7 @@ subroutine stressCD_U (nx_block, ny_block, & DminUarea = deltaminEVP*uarea(i,j) ! only need etax2U here, but other terms are calculated with etax2U ! minimal extra calculations here even though it seems like there is - call visc_replpress (strengthU(i,j), DminUarea, DeltaU(i,j), & + call visc_replpress (strengthU(i,j), DminUarea, deltaU(i,j), & lzetax2U , letax2U , lrep_prsU , capping) endif diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 index 187ec55cc..5e2757b93 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 @@ -24,9 +24,8 @@ module ice_dyn_shared implicit none private public :: set_evp_parameters, stepu, stepuv_CD, stepu_C, stepv_C, & - principal_stress, init_dyn, dyn_prep1, dyn_prep2, dyn_finish, & + principal_stress, init_dyn_shared, dyn_prep1, dyn_prep2, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & - alloc_dyn_shared, & deformations, deformationsC_T, deformationsCD_T, & strain_rates, strain_rates_T, strain_rates_U, & visc_replpress, & @@ -94,6 +93,11 @@ module ice_dyn_shared fcorE_blk(:,:,:), & ! Coriolis parameter at E points (1/s) fcorN_blk(:,:,:) ! Coriolis parameter at N points (1/s) + real (kind=dbl_kind), allocatable, public :: & + fld2(:,:,:,:), & ! 2 bundled fields + fld3(:,:,:,:), & ! 3 bundled fields + fld4(:,:,:,:) ! 4 bundled fields + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & uvel_init , & ! x-component of velocity (m/s), beginning of timestep vvel_init ! y-component of velocity (m/s), beginning of timestep @@ -176,6 +180,15 @@ subroutine alloc_dyn_shared vvel_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep iceTmask (nx_block,ny_block,max_blocks), & ! T mask for dynamics iceUmask (nx_block,ny_block,max_blocks), & ! U mask for dynamics + fcor_blk (nx_block,ny_block,max_blocks), & ! Coriolis + DminTarea (nx_block,ny_block,max_blocks), & ! + stat=ierr) + if (ierr/=0) call abort_ice(subname//': Out of memory') + + allocate( & + fld2(nx_block,ny_block,2,max_blocks), & + fld3(nx_block,ny_block,3,max_blocks), & + fld4(nx_block,ny_block,4,max_blocks), & stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') @@ -187,6 +200,8 @@ subroutine alloc_dyn_shared vvelN_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep iceEmask (nx_block,ny_block,max_blocks), & ! T mask for dynamics iceNmask (nx_block,ny_block,max_blocks), & ! U mask for dynamics + fcorE_blk (nx_block,ny_block,max_blocks), & ! Coriolis + fcorN_blk (nx_block,ny_block,max_blocks), & ! Coriolis stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') endif @@ -197,18 +212,18 @@ end subroutine alloc_dyn_shared ! Initialize parameters and variables needed for the dynamics ! author: Elizabeth C. Hunke, LANL - subroutine init_dyn (dt) + subroutine init_dyn_shared (dt) use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks, halo_dynbundle use ice_domain_size, only: max_blocks - use ice_flux, only: rdg_conv, rdg_shear, & + use ice_flux, only: & stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4, & stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U - use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN, divu, shear + use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN use ice_grid, only: ULAT, NLAT, ELAT, tarea real (kind=dbl_kind), intent(in) :: & @@ -221,10 +236,11 @@ subroutine init_dyn (dt) nprocs, & ! number of processors iblk ! block index - character(len=*), parameter :: subname = '(init_dyn)' + character(len=*), parameter :: subname = '(init_dyn_shared)' call set_evp_parameters (dt) - + ! allocate dyn shared (init_uvel,init_vvel) + call alloc_dyn_shared ! Set halo_dynbundle, this is empirical at this point, could become namelist halo_dynbundle = .true. nprocs = get_num_procs() @@ -237,14 +253,6 @@ subroutine init_dyn (dt) write(nu_diag,*) 'halo_dynbundle =', halo_dynbundle endif - allocate(fcor_blk(nx_block,ny_block,max_blocks)) - allocate(DminTarea(nx_block,ny_block,max_blocks)) - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - allocate(fcorE_blk(nx_block,ny_block,max_blocks)) - allocate(fcorN_blk(nx_block,ny_block,max_blocks)) - endif - !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) do iblk = 1, nblocks do j = 1, ny_block @@ -260,11 +268,6 @@ subroutine init_dyn (dt) vvelN(i,j,iblk) = c0 endif - ! strain rates - divu (i,j,iblk) = c0 - shear(i,j,iblk) = c0 - rdg_conv (i,j,iblk) = c0 - rdg_shear(i,j,iblk) = c0 ! Coriolis parameter if (trim(coriolis) == 'constant') then @@ -330,7 +333,7 @@ subroutine init_dyn (dt) enddo ! iblk !$OMP END PARALLEL DO - end subroutine init_dyn + end subroutine init_dyn_shared !======================================================================= ! Set parameters needed for the evp dynamics. diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 index 6534e7568..5a01f4308 100644 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 @@ -48,7 +48,7 @@ module ice_dyn_vp use ice_dyn_shared, only: dyn_prep1, dyn_prep2, dyn_finish, & cosw, sinw, fcor_blk, uvel_init, vvel_init, & seabed_stress_factor_LKD, seabed_stress_factor_prob, seabed_stress_method, & - seabed_stress, Ktens, stack_fields, unstack_fields + seabed_stress, Ktens, stack_fields, unstack_fields, fld2, fld3, fld4 use ice_fileunits, only: nu_diag use ice_flux, only: fmU use ice_global_reductions, only: global_sum @@ -105,11 +105,6 @@ module ice_dyn_vp indxUi(:,:) , & ! compressed index in i-direction indxUj(:,:) ! compressed index in j-direction - real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:), & ! work array for boundary updates - fld3(:,:,:,:), & ! work array for boundary updates - fld4(:,:,:,:) ! work array for boundary updates - !======================================================================= contains @@ -126,6 +121,8 @@ subroutine init_vp use ice_constants, only: c1, & field_loc_center, field_type_scalar use ice_domain, only: blocks_ice, halo_info + use ice_calendar, only: dt_dyn + use ice_dyn_shared, only: init_dyn_shared ! use ice_grid, only: tarea ! local variables @@ -137,15 +134,14 @@ subroutine init_vp type (block) :: & this_block ! block information for current block + call init_dyn_shared(dt_dyn) + ! Initialize module variables allocate(icellT(max_blocks), icellU(max_blocks)) allocate(indxTi(nx_block*ny_block, max_blocks), & indxTj(nx_block*ny_block, max_blocks), & indxUi(nx_block*ny_block, max_blocks), & indxUj(nx_block*ny_block, max_blocks)) - allocate(fld2(nx_block,ny_block,2,max_blocks)) - allocate(fld3(nx_block,ny_block,3,max_blocks)) - allocate(fld4(nx_block,ny_block,4,max_blocks)) end subroutine init_vp diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 index bd5a49eaf..ffe9ec587 100644 --- a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 @@ -55,7 +55,7 @@ subroutine dumpfile(filename_spec) use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks use ice_domain_size, only: nilyr, nslyr, ncat, max_blocks - use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask + use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask, kdyn use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & @@ -215,45 +215,52 @@ subroutine dumpfile(filename_spec) !----------------------------------------------------------------- ! ice mask for dynamics !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = c0 - if (iceUmask(i,j,iblk)) work1(i,j,iblk) = c1 - enddo - enddo - enddo - !$OMP END PARALLEL DO - call write_restart_field(nu_dump,0,work1,'ruf8','iceumask',1,diag) - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - + if (kdyn > 0) then !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = c0 - if (iceNmask(i,j,iblk)) work1(i,j,iblk) = c1 + if (iceUmask(i,j,iblk)) work1(i,j,iblk) = c1 enddo enddo enddo !$OMP END PARALLEL DO - call write_restart_field(nu_dump,0,work1,'ruf8','icenmask',1,diag) + call write_restart_field(nu_dump,0,work1,'ruf8','iceumask',1,diag) - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = c0 - if (iceEmask(i,j,iblk)) work1(i,j,iblk) = c1 - enddo + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + if (iceNmask(i,j,iblk)) work1(i,j,iblk) = c1 + enddo + enddo enddo - enddo - !$OMP END PARALLEL DO - call write_restart_field(nu_dump,0,work1,'ruf8','iceemask',1,diag) + !$OMP END PARALLEL DO + call write_restart_field(nu_dump,0,work1,'ruf8','icenmask',1,diag) + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + if (iceEmask(i,j,iblk)) work1(i,j,iblk) = c1 + enddo + enddo + enddo + !$OMP END PARALLEL DO + call write_restart_field(nu_dump,0,work1,'ruf8','iceemask',1,diag) + endif + else + work1(:,:,:) = c0 + call write_restart_field(nu_dump,0,work1,'ruf8','iceumask',1,diag) + if (grid_ice == 'CD' .or. grid_ice == 'C') then + call write_restart_field(nu_dump,0,work1,'ruf8','icenmask',1,diag) + call write_restart_field(nu_dump,0,work1,'ruf8','iceemask',1,diag) + endif endif ! for mixed layer model @@ -277,7 +284,7 @@ subroutine restartfile (ice_ic) use ice_domain, only: nblocks, halo_info use ice_domain_size, only: nilyr, nslyr, ncat, & max_blocks - use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask + use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask,kdyn use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & @@ -524,57 +531,76 @@ subroutine restartfile (ice_ic) !----------------------------------------------------------------- ! ice mask for dynamics !----------------------------------------------------------------- - if (my_task == master_task) & - write(nu_diag,*) 'ice mask for dynamics' - - call read_restart_field(nu_restart,0,work1,'ruf8', & - 'iceumask',1,diag,field_loc_center, field_type_scalar) - - iceUmask(:,:,:) = .false. - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceUmask(i,j,iblk) = .true. - enddo - enddo - enddo - !$OMP END PARALLEL DO + if (kdyn > 0) then - if (grid_ice == 'CD' .or. grid_ice == 'C') then - - if (query_field(nu_restart,'icenmask')) then + if (my_task == master_task) & + write(nu_diag,*) 'ice mask for dynamics' + if (query_field(nu_restart,'iceumask')) then call read_restart_field(nu_restart,0,work1,'ruf8', & - 'icenmask',1,diag,field_loc_center, field_type_scalar) + 'iceumask',1,diag,field_loc_center, field_type_scalar) - iceNmask(:,:,:) = .false. + iceUmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceNmask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceUmask(i,j,iblk) = .true. enddo enddo enddo !$OMP END PARALLEL DO endif - - if (query_field(nu_restart,'iceemask')) then - call read_restart_field(nu_restart,0,work1,'ruf8', & - 'iceemask',1,diag,field_loc_center, field_type_scalar) - - iceEmask(:,:,:) = .false. - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceEmask(i,j,iblk) = .true. + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + if (query_field(nu_restart,'icenmask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'icenmask',1,diag,field_loc_center, field_type_scalar) + + iceNmask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) iceNmask(i,j,iblk) = .true. + enddo + enddo enddo + !$OMP END PARALLEL DO + endif + + if (query_field(nu_restart,'iceemask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'iceemask',1,diag,field_loc_center, field_type_scalar) + + iceEmask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) iceEmask(i,j,iblk) = .true. + enddo + enddo enddo - enddo - !$OMP END PARALLEL DO + !$OMP END PARALLEL DO + endif + endif + else + if (my_task == master_task) & + write(nu_diag,*) 'ice mask for dynamics - not used, however mandatory to read in binary files' + if (query_field(nu_restart,'iceumask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'iceumask',1,diag,field_loc_center, field_type_scalar) + endif + if (grid_ice == 'CD' .or. grid_ice == 'C') then + if (query_field(nu_restart,'icenmask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'icenmask',1,diag,field_loc_center, field_type_scalar) + endif + if (query_field(nu_restart,'iceemask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'iceemask',1,diag,field_loc_center, field_type_scalar) + endif endif - endif ! set Tsfcn to c0 on land diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 index 679a2b6e6..10fcf8b81 100644 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 @@ -942,11 +942,8 @@ logical function query_field(nu,vname) query_field = .false. - if (my_task == master_task) then - status = pio_inq_varid(File,trim(vname),vardesc) - if (status == PIO_noerr) query_field = .true. - endif - call broadcast_scalar(query_field,master_task) + status = pio_inq_varid(File,trim(vname),vardesc) + if (status == PIO_noerr) query_field = .true. end function query_field diff --git a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 index 0b8ed689e..cd27f296e 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 @@ -70,9 +70,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -111,7 +112,6 @@ subroutine cice_init call alloc_grid ! allocate grid call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state - call alloc_dyn_shared ! allocate dyn shared (init_uvel,init_vvel) call alloc_flux_bgc ! allocate flux_bgc call alloc_flux ! allocate flux call init_ice_timers ! initialize all timers @@ -122,9 +122,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables @@ -262,7 +262,7 @@ subroutine init_restart nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, & nt_iage, nt_FY, nt_aero, nt_fsd - character(len=*),parameter :: subname = '(init_restart)' + character(len=*), parameter :: subname = '(init_restart)' call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_warnings_flush(nu_diag) diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 index 0b8ed689e..cd27f296e 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 @@ -70,9 +70,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -111,7 +112,6 @@ subroutine cice_init call alloc_grid ! allocate grid call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state - call alloc_dyn_shared ! allocate dyn shared (init_uvel,init_vvel) call alloc_flux_bgc ! allocate flux_bgc call alloc_flux ! allocate flux call init_ice_timers ! initialize all timers @@ -122,9 +122,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables @@ -262,7 +262,7 @@ subroutine init_restart nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, & nt_iage, nt_FY, nt_aero, nt_fsd - character(len=*),parameter :: subname = '(init_restart)' + character(len=*), parameter :: subname = '(init_restart)' call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_warnings_flush(nu_diag) diff --git a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 index a8bf96ad2..5ee070673 100644 --- a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 @@ -72,9 +72,10 @@ subroutine cice_init(mpicom_ice) use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -125,7 +126,6 @@ subroutine cice_init(mpicom_ice) call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -135,9 +135,9 @@ subroutine cice_init(mpicom_ice) call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 index 5fbde9cce..091a948bb 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 @@ -36,7 +36,6 @@ subroutine cice_init1() use ice_domain , only: init_domain_blocks use ice_arrays_column , only: alloc_arrays_column use ice_state , only: alloc_state - use ice_dyn_shared , only: alloc_dyn_shared use ice_flux_bgc , only: alloc_flux_bgc use ice_flux , only: alloc_flux use ice_timers , only: timer_total, init_ice_timers, ice_timer_start @@ -59,7 +58,6 @@ subroutine cice_init1() call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -79,9 +77,10 @@ subroutine cice_init2() use ice_communicate , only: my_task, master_task use ice_diagnostics , only: init_diags use ice_domain_size , only: ncat, nfsd, nfreq - use ice_dyn_eap , only: init_eap, alloc_dyn_eap - use ice_dyn_shared , only: kdyn, init_dyn + use ice_dyn_eap , only: init_eap + use ice_dyn_evp , only: init_evp use ice_dyn_vp , only: init_vp + use ice_dyn_shared , only: kdyn use ice_flux , only: init_coupler_flux, init_history_therm use ice_flux , only: init_history_dyn, init_flux_atm, init_flux_ocn use ice_forcing , only: init_snowtable @@ -107,9 +106,9 @@ subroutine cice_init2() call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables + if (kdyn == 1) then + call init_evp ! allocate dyn_evp arrays if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 index 22596429d..02356e2ba 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 @@ -15,6 +15,7 @@ module CICE_InitMod use ice_kinds_mod use ice_exit, only: abort_ice use ice_fileunits, only: init_fileunits, nu_diag + use ice_memusage, only: ice_memusage_init, ice_memusage_print use icepack_intfc, only: icepack_aggregate use icepack_intfc, only: icepack_init_itd, icepack_init_itd_hist use icepack_intfc, only: icepack_init_fsd_bounds, icepack_init_wave @@ -47,9 +48,9 @@ subroutine CICE_Initialize(mpi_comm) integer (kind=int_kind), optional, intent(in) :: mpi_comm ! communicator from nuopc character(len=*), parameter :: subname='(CICE_Initialize)' - !-------------------------------------------------------------------- - ! model initialization - !-------------------------------------------------------------------- + !-------------------------------------------------------------------- + ! model initialization + !-------------------------------------------------------------------- if (present(mpi_comm)) then call cice_init(mpi_comm) @@ -70,15 +71,16 @@ subroutine cice_init(mpi_comm) floe_binwidth, c_fsd_range use ice_state, only: alloc_state use ice_flux_bgc, only: alloc_flux_bgc - use ice_calendar, only: dt, dt_dyn, istep, istep1, write_ic, & + use ice_calendar, only: dt, dt_dyn, write_ic, & init_calendar, advance_timestep, calc_timesteps use ice_communicate, only: init_communicate, my_task, master_task use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -122,12 +124,17 @@ subroutine cice_init(mpi_comm) call input_zbgc ! vertical biogeochemistry namelist call count_tracers ! count tracers + ! Call this as early as possible, must be after memory_stats is read + if (my_task == master_task) then + call ice_memusage_init(nu_diag) + call ice_memusage_print(nu_diag,subname//':start') + endif + call init_domain_blocks ! set up block decomposition call init_grid1 ! domain distribution call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -137,9 +144,9 @@ subroutine cice_init(mpi_comm) call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables @@ -254,6 +261,10 @@ subroutine cice_init(mpi_comm) if (write_ic) call accum_hist(dt) ! write initial conditions + if (my_task == master_task) then + call ice_memusage_print(nu_diag,subname//':end') + endif + end subroutine cice_init !======================================================================= diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index 9c30b15a3..c91dae4b4 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -72,9 +72,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -122,7 +123,6 @@ subroutine cice_init call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -132,9 +132,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 index 84d1a3a60..c65f04150 100644 --- a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 @@ -70,9 +70,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -113,7 +114,6 @@ subroutine cice_init call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -123,9 +123,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 index 84d1a3a60..f0877d502 100644 --- a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 @@ -70,8 +70,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp + use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_dyn_vp, only: init_vp use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux @@ -113,7 +115,6 @@ subroutine cice_init call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -123,9 +124,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/configuration/scripts/options/set_nml.dyneap b/configuration/scripts/options/set_nml.dyneap new file mode 100644 index 000000000..0a5140ac7 --- /dev/null +++ b/configuration/scripts/options/set_nml.dyneap @@ -0,0 +1,2 @@ +kdyn = 2 + From 9104a8ce966c08545857596e69530c8f52b260e0 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Fri, 18 Nov 2022 08:18:39 -0800 Subject: [PATCH 39/57] Rename cicedynB to cicedyn, update test suites (#790) * Rename cicedynB to cicedyn, update test suites - Rename cicedynB directory to cicedyn (See #660) - Add softlink for cicedynB - Update path in cice.build - Update documentation - Fix sig1, sig2, sigP grid on history file (See #789) - Fix Fortran warning messages for long lines - Fix test suite order in cice.setup - Update test suites to reduce bfbcomp failures due to time outs - Add tests to first_suite to help - Add dyneap and dynpicard decomp tests, add set_nml.dyneap - Add TAB check in github actions in all .F90 and .c files github actions will fail if source files have TABs * Remove tabs * Update documentation * Update documentation --- .github/workflows/test-cice.yml | 14 +++ cice.setup | 17 ++- .../analysis/ice_diagnostics.F90 | 0 .../analysis/ice_diagnostics_bgc.F90 | 0 .../analysis/ice_history.F90 | 12 +- .../analysis/ice_history_bgc.F90 | 0 .../analysis/ice_history_drag.F90 | 0 .../analysis/ice_history_fsd.F90 | 0 .../analysis/ice_history_mechred.F90 | 0 .../analysis/ice_history_pond.F90 | 0 .../analysis/ice_history_shared.F90 | 0 .../analysis/ice_history_snow.F90 | 0 .../dynamics/ice_dyn_eap.F90 | 0 .../dynamics/ice_dyn_evp.F90 | 0 .../dynamics/ice_dyn_evp_1d.F90 | 0 .../dynamics/ice_dyn_shared.F90 | 0 .../dynamics/ice_dyn_vp.F90 | 0 .../dynamics/ice_transport_driver.F90 | 0 .../dynamics/ice_transport_remap.F90 | 0 .../general/ice_flux.F90 | 0 .../general/ice_flux_bgc.F90 | 0 .../general/ice_forcing.F90 | 4 +- .../general/ice_forcing_bgc.F90 | 0 .../general/ice_init.F90 | 9 +- .../general/ice_state.F90 | 0 .../general/ice_step_mod.F90 | 0 .../infrastructure/comm/mpi/ice_boundary.F90 | 3 - .../infrastructure/comm/mpi/ice_broadcast.F90 | 0 .../comm/mpi/ice_communicate.F90 | 0 .../infrastructure/comm/mpi/ice_exit.F90 | 0 .../comm/mpi/ice_gather_scatter.F90 | 0 .../comm/mpi/ice_global_reductions.F90 | 0 .../infrastructure/comm/mpi/ice_reprosum.F90 | 0 .../infrastructure/comm/mpi/ice_timers.F90 | 0 .../comm/serial/ice_boundary.F90 | 3 - .../comm/serial/ice_broadcast.F90 | 0 .../comm/serial/ice_communicate.F90 | 0 .../infrastructure/comm/serial/ice_exit.F90 | 0 .../comm/serial/ice_gather_scatter.F90 | 0 .../comm/serial/ice_global_reductions.F90 | 0 .../comm/serial/ice_reprosum.F90 | 0 .../infrastructure/comm/serial/ice_timers.F90 | 0 .../infrastructure/ice_blocks.F90 | 0 .../infrastructure/ice_domain.F90 | 0 .../infrastructure/ice_grid.F90 | 0 .../infrastructure/ice_memusage.F90 | 0 .../infrastructure/ice_memusage_gptl.c | 2 +- .../infrastructure/ice_read_write.F90 | 0 .../infrastructure/ice_restart_driver.F90 | 0 .../infrastructure/ice_restoring.F90 | 0 .../infrastructure/ice_shr_reprosum86.c | 0 .../io/io_binary/ice_history_write.F90 | 0 .../io/io_binary/ice_restart.F90 | 0 .../io/io_netcdf/ice_history_write.F90 | 0 .../io/io_netcdf/ice_restart.F90 | 0 .../io/io_pio2/ice_history_write.F90 | 0 .../infrastructure/io/io_pio2/ice_pio.F90 | 0 .../infrastructure/io/io_pio2/ice_restart.F90 | 0 cicecore/cicedynB | 1 + configuration/scripts/cice.build | 12 +- configuration/scripts/options/set_nml.dyneap | 1 - configuration/scripts/tests/decomp_suite.ts | 2 + configuration/scripts/tests/first_suite.ts | 6 + configuration/scripts/tests/gridsys_suite.ts | 13 ++- configuration/scripts/tests/omp_suite.ts | 108 ++++++++++-------- doc/source/developer_guide/dg_dynamics.rst | 22 ++-- doc/source/developer_guide/dg_forcing.rst | 2 +- doc/source/user_guide/ug_case_settings.rst | 14 +-- doc/source/user_guide/ug_implementation.rst | 2 +- doc/source/user_guide/ug_testing.rst | 83 ++++++++++---- doc/source/user_guide/ug_troubleshooting.rst | 20 ++++ 71 files changed, 231 insertions(+), 119 deletions(-) rename cicecore/{cicedynB => cicedyn}/analysis/ice_diagnostics.F90 (100%) rename cicecore/{cicedynB => cicedyn}/analysis/ice_diagnostics_bgc.F90 (100%) rename cicecore/{cicedynB => cicedyn}/analysis/ice_history.F90 (99%) rename cicecore/{cicedynB => cicedyn}/analysis/ice_history_bgc.F90 (100%) rename cicecore/{cicedynB => cicedyn}/analysis/ice_history_drag.F90 (100%) rename cicecore/{cicedynB => cicedyn}/analysis/ice_history_fsd.F90 (100%) rename cicecore/{cicedynB => cicedyn}/analysis/ice_history_mechred.F90 (100%) rename cicecore/{cicedynB => cicedyn}/analysis/ice_history_pond.F90 (100%) rename cicecore/{cicedynB => cicedyn}/analysis/ice_history_shared.F90 (100%) rename cicecore/{cicedynB => cicedyn}/analysis/ice_history_snow.F90 (100%) rename cicecore/{cicedynB => cicedyn}/dynamics/ice_dyn_eap.F90 (100%) rename cicecore/{cicedynB => cicedyn}/dynamics/ice_dyn_evp.F90 (100%) rename cicecore/{cicedynB => cicedyn}/dynamics/ice_dyn_evp_1d.F90 (100%) rename cicecore/{cicedynB => cicedyn}/dynamics/ice_dyn_shared.F90 (100%) rename cicecore/{cicedynB => cicedyn}/dynamics/ice_dyn_vp.F90 (100%) rename cicecore/{cicedynB => cicedyn}/dynamics/ice_transport_driver.F90 (100%) rename cicecore/{cicedynB => cicedyn}/dynamics/ice_transport_remap.F90 (100%) rename cicecore/{cicedynB => cicedyn}/general/ice_flux.F90 (100%) rename cicecore/{cicedynB => cicedyn}/general/ice_flux_bgc.F90 (100%) rename cicecore/{cicedynB => cicedyn}/general/ice_forcing.F90 (99%) rename cicecore/{cicedynB => cicedyn}/general/ice_forcing_bgc.F90 (100%) rename cicecore/{cicedynB => cicedyn}/general/ice_init.F90 (99%) rename cicecore/{cicedynB => cicedyn}/general/ice_state.F90 (100%) rename cicecore/{cicedynB => cicedyn}/general/ice_step_mod.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/mpi/ice_boundary.F90 (99%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/mpi/ice_broadcast.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/mpi/ice_communicate.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/mpi/ice_exit.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/mpi/ice_gather_scatter.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/mpi/ice_global_reductions.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/mpi/ice_reprosum.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/mpi/ice_timers.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/serial/ice_boundary.F90 (99%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/serial/ice_broadcast.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/serial/ice_communicate.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/serial/ice_exit.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/serial/ice_gather_scatter.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/serial/ice_global_reductions.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/serial/ice_reprosum.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/comm/serial/ice_timers.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/ice_blocks.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/ice_domain.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/ice_grid.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/ice_memusage.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/ice_memusage_gptl.c (98%) rename cicecore/{cicedynB => cicedyn}/infrastructure/ice_read_write.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/ice_restart_driver.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/ice_restoring.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/ice_shr_reprosum86.c (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/io/io_binary/ice_history_write.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/io/io_binary/ice_restart.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/io/io_netcdf/ice_history_write.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/io/io_netcdf/ice_restart.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/io/io_pio2/ice_history_write.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/io/io_pio2/ice_pio.F90 (100%) rename cicecore/{cicedynB => cicedyn}/infrastructure/io/io_pio2/ice_restart.F90 (100%) create mode 120000 cicecore/cicedynB diff --git a/.github/workflows/test-cice.yml b/.github/workflows/test-cice.yml index c60a07721..70fdc4c14 100644 --- a/.github/workflows/test-cice.yml +++ b/.github/workflows/test-cice.yml @@ -74,6 +74,20 @@ jobs: ln -s ${GITHUB_WORKSPACE}/../CICE ${HOME}/cice # ls -al ${HOME}/ # ls -al ${GITHUB_WORKSPACE}/ + - name: check for tabs + run: | + cd $HOME/cice/cicecore + set cnt = 0 + set ffiles = `find -P . -iname "*.f*"` + set cfiles = `find -P . -iname "*.c*"` + foreach file ($ffiles $cfiles) + set fcnt = `sed -n '/\t/p' $file | wc -l` + @ cnt = $cnt + $fcnt + if ($fcnt > 0) then + echo "TAB found: $fcnt $file" + endif + end + exit $cnt - name: setup conda env shell: /bin/bash {0} run: | diff --git a/cice.setup b/cice.setup index 586fe3464..30da0ed2e 100755 --- a/cice.setup +++ b/cice.setup @@ -455,7 +455,22 @@ if ( ${dosuite} == 0 ) then set sets = "" else - set tarrays = `echo ${testsuite} | sed 's/,/ /g' | fmt -1 | sort -u` + # generate unique set of suites in tarrays in order they are set + set tarrays0 = `echo ${testsuite} | sed 's/,/ /g' | fmt -1 ` + #echo "${0}: tarrays0 = ${tarrays0}" + set tarrays = $tarrays0[1] + foreach t1 ( ${tarrays0} ) + set found = 0 + foreach t2 ( ${tarrays} ) + if ( ${t1} == ${t2} ) then + set found = 1 + endif + end + if ( ${found} == 0 ) then + set tarrays = ( ${tarrays} ${t1} ) + endif + end + #echo "${0}: tarrays = ${tarrays}" set testsuitecnt = 0 foreach tarray ( ${tarrays} ) @ testsuitecnt = ${testsuitecnt} + 1 diff --git a/cicecore/cicedynB/analysis/ice_diagnostics.F90 b/cicecore/cicedyn/analysis/ice_diagnostics.F90 similarity index 100% rename from cicecore/cicedynB/analysis/ice_diagnostics.F90 rename to cicecore/cicedyn/analysis/ice_diagnostics.F90 diff --git a/cicecore/cicedynB/analysis/ice_diagnostics_bgc.F90 b/cicecore/cicedyn/analysis/ice_diagnostics_bgc.F90 similarity index 100% rename from cicecore/cicedynB/analysis/ice_diagnostics_bgc.F90 rename to cicecore/cicedyn/analysis/ice_diagnostics_bgc.F90 diff --git a/cicecore/cicedynB/analysis/ice_history.F90 b/cicecore/cicedyn/analysis/ice_history.F90 similarity index 99% rename from cicecore/cicedynB/analysis/ice_history.F90 rename to cicecore/cicedyn/analysis/ice_history.F90 index f5e7d0d16..2142310b9 100644 --- a/cicecore/cicedynB/analysis/ice_history.F90 +++ b/cicecore/cicedyn/analysis/ice_history.F90 @@ -102,6 +102,8 @@ subroutine init_hist (dt) character (len=25) :: & cstr_gat, cstr_gau, cstr_gav, & ! mask area name for t, u, v atm grid (ga) cstr_got, cstr_gou, cstr_gov ! mask area name for t, u, v ocn grid (go) + character (len=25) :: & + gridstr2D, gridstr ! temporary string names character(len=char_len) :: description character(len=*), parameter :: subname = '(init_hist)' @@ -1307,21 +1309,25 @@ subroutine init_hist (dt) select case (grid_ice) case('B') description = ", on U grid (NE corner values)" + gridstr2d = trim(ustr2D) + gridstr = trim(ucstr) case ('CD','C') description = ", on T grid" + gridstr2d = trim(tstr2D) + gridstr = trim(tcstr) end select - call define_hist_field(n_sig1,"sig1","1",ustr2D, ucstr, & + call define_hist_field(n_sig1,"sig1","1",gridstr2d, gridstr, & "norm. principal stress 1", & "sig1 is instantaneous" // trim(description), c1, c0, & ns1, f_sig1) - call define_hist_field(n_sig2,"sig2","1",ustr2D, ucstr, & + call define_hist_field(n_sig2,"sig2","1",gridstr2d, gridstr, & "norm. principal stress 2", & "sig2 is instantaneous" // trim(description), c1, c0, & ns1, f_sig2) - call define_hist_field(n_sigP,"sigP","1",ustr2D, ucstr, & + call define_hist_field(n_sigP,"sigP","1",gridstr2d, gridstr, & "ice pressure", & "sigP is instantaneous" // trim(description), c1, c0, & ns1, f_sigP) diff --git a/cicecore/cicedynB/analysis/ice_history_bgc.F90 b/cicecore/cicedyn/analysis/ice_history_bgc.F90 similarity index 100% rename from cicecore/cicedynB/analysis/ice_history_bgc.F90 rename to cicecore/cicedyn/analysis/ice_history_bgc.F90 diff --git a/cicecore/cicedynB/analysis/ice_history_drag.F90 b/cicecore/cicedyn/analysis/ice_history_drag.F90 similarity index 100% rename from cicecore/cicedynB/analysis/ice_history_drag.F90 rename to cicecore/cicedyn/analysis/ice_history_drag.F90 diff --git a/cicecore/cicedynB/analysis/ice_history_fsd.F90 b/cicecore/cicedyn/analysis/ice_history_fsd.F90 similarity index 100% rename from cicecore/cicedynB/analysis/ice_history_fsd.F90 rename to cicecore/cicedyn/analysis/ice_history_fsd.F90 diff --git a/cicecore/cicedynB/analysis/ice_history_mechred.F90 b/cicecore/cicedyn/analysis/ice_history_mechred.F90 similarity index 100% rename from cicecore/cicedynB/analysis/ice_history_mechred.F90 rename to cicecore/cicedyn/analysis/ice_history_mechred.F90 diff --git a/cicecore/cicedynB/analysis/ice_history_pond.F90 b/cicecore/cicedyn/analysis/ice_history_pond.F90 similarity index 100% rename from cicecore/cicedynB/analysis/ice_history_pond.F90 rename to cicecore/cicedyn/analysis/ice_history_pond.F90 diff --git a/cicecore/cicedynB/analysis/ice_history_shared.F90 b/cicecore/cicedyn/analysis/ice_history_shared.F90 similarity index 100% rename from cicecore/cicedynB/analysis/ice_history_shared.F90 rename to cicecore/cicedyn/analysis/ice_history_shared.F90 diff --git a/cicecore/cicedynB/analysis/ice_history_snow.F90 b/cicecore/cicedyn/analysis/ice_history_snow.F90 similarity index 100% rename from cicecore/cicedynB/analysis/ice_history_snow.F90 rename to cicecore/cicedyn/analysis/ice_history_snow.F90 diff --git a/cicecore/cicedynB/dynamics/ice_dyn_eap.F90 b/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 similarity index 100% rename from cicecore/cicedynB/dynamics/ice_dyn_eap.F90 rename to cicecore/cicedyn/dynamics/ice_dyn_eap.F90 diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 similarity index 100% rename from cicecore/cicedynB/dynamics/ice_dyn_evp.F90 rename to cicecore/cicedyn/dynamics/ice_dyn_evp.F90 diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 b/cicecore/cicedyn/dynamics/ice_dyn_evp_1d.F90 similarity index 100% rename from cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 rename to cicecore/cicedyn/dynamics/ice_dyn_evp_1d.F90 diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 similarity index 100% rename from cicecore/cicedynB/dynamics/ice_dyn_shared.F90 rename to cicecore/cicedyn/dynamics/ice_dyn_shared.F90 diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 similarity index 100% rename from cicecore/cicedynB/dynamics/ice_dyn_vp.F90 rename to cicecore/cicedyn/dynamics/ice_dyn_vp.F90 diff --git a/cicecore/cicedynB/dynamics/ice_transport_driver.F90 b/cicecore/cicedyn/dynamics/ice_transport_driver.F90 similarity index 100% rename from cicecore/cicedynB/dynamics/ice_transport_driver.F90 rename to cicecore/cicedyn/dynamics/ice_transport_driver.F90 diff --git a/cicecore/cicedynB/dynamics/ice_transport_remap.F90 b/cicecore/cicedyn/dynamics/ice_transport_remap.F90 similarity index 100% rename from cicecore/cicedynB/dynamics/ice_transport_remap.F90 rename to cicecore/cicedyn/dynamics/ice_transport_remap.F90 diff --git a/cicecore/cicedynB/general/ice_flux.F90 b/cicecore/cicedyn/general/ice_flux.F90 similarity index 100% rename from cicecore/cicedynB/general/ice_flux.F90 rename to cicecore/cicedyn/general/ice_flux.F90 diff --git a/cicecore/cicedynB/general/ice_flux_bgc.F90 b/cicecore/cicedyn/general/ice_flux_bgc.F90 similarity index 100% rename from cicecore/cicedynB/general/ice_flux_bgc.F90 rename to cicecore/cicedyn/general/ice_flux_bgc.F90 diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedyn/general/ice_forcing.F90 similarity index 99% rename from cicecore/cicedynB/general/ice_forcing.F90 rename to cicecore/cicedyn/general/ice_forcing.F90 index ff79778c5..3a2d83530 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedyn/general/ice_forcing.F90 @@ -5684,7 +5684,7 @@ subroutine wave_spec_data integer (kind=int_kind) :: & ncid , & ! netcdf file id - i, j, freq , & + i, j, freq , & ixm,ixx,ixp , & ! record numbers for neighboring months recnum , & ! record number maxrec , & ! maximum record number @@ -5785,7 +5785,7 @@ subroutine wave_spec_data call ice_read_nc_xyf(ncid,recnum,'efreq',wave_spectrum_data(:,:,:,2,:),debug_n_d, & field_loc=field_loc_center, & field_type=field_type_scalar) - call ice_close_nc(ncid) + call ice_close_nc(ncid) ! Interpolate diff --git a/cicecore/cicedynB/general/ice_forcing_bgc.F90 b/cicecore/cicedyn/general/ice_forcing_bgc.F90 similarity index 100% rename from cicecore/cicedynB/general/ice_forcing_bgc.F90 rename to cicecore/cicedyn/general/ice_forcing_bgc.F90 diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedyn/general/ice_init.F90 similarity index 99% rename from cicecore/cicedynB/general/ice_init.F90 rename to cicecore/cicedyn/general/ice_init.F90 index 45ae58d8b..2bfe0d8e5 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedyn/general/ice_init.F90 @@ -2027,11 +2027,14 @@ subroutine input_data if (trim(wave_spec_type) == 'none') then tmpstr2 = ' : no wave data provided, no wave-ice interactions' elseif (trim(wave_spec_type) == 'profile') then - tmpstr2 = ' : use fixed dummy wave spectrum for testing, sea surface height generated using constant phase (1 iteration of wave fracture)' + tmpstr2 = ' : use fixed dummy wave spectrum for testing, sea surface height generated '// & + 'using constant phase (1 iteration of wave fracture)' elseif (trim(wave_spec_type) == 'constant') then - tmpstr2 = ' : wave spectrum data file provided, sea surface height generated using constant phase (1 iteration of wave fracture)' + tmpstr2 = ' : wave spectrum data file provided, sea surface height generated '// & + 'using constant phase (1 iteration of wave fracture)' elseif (trim(wave_spec_type) == 'random') then - tmpstr2 = ' : wave spectrum data file provided, sea surface height generated using random number (multiple iterations of wave fracture to convergence)' + tmpstr2 = ' : wave spectrum data file provided, sea surface height generated using '// & + 'random number (multiple iterations of wave fracture to convergence)' else tmpstr2 = ' : unknown value' endif diff --git a/cicecore/cicedynB/general/ice_state.F90 b/cicecore/cicedyn/general/ice_state.F90 similarity index 100% rename from cicecore/cicedynB/general/ice_state.F90 rename to cicecore/cicedyn/general/ice_state.F90 diff --git a/cicecore/cicedynB/general/ice_step_mod.F90 b/cicecore/cicedyn/general/ice_step_mod.F90 similarity index 100% rename from cicecore/cicedynB/general/ice_step_mod.F90 rename to cicecore/cicedyn/general/ice_step_mod.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 similarity index 99% rename from cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 rename to cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 index 9fda67dad..68436cd0f 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 +++ b/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 @@ -6915,9 +6915,6 @@ subroutine primary_grid_lengths_global_ext( & ! This subroutine adds ghost cells to global primary grid lengths array ! ARRAY_I and outputs result to array ARRAY_O -! Note duplicate implementation of this subroutine in: -! cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 - use ice_constants, only: c0 use ice_domain_size, only: nx_global, ny_global diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_broadcast.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_broadcast.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/mpi/ice_broadcast.F90 rename to cicecore/cicedyn/infrastructure/comm/mpi/ice_broadcast.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_communicate.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 rename to cicecore/cicedyn/infrastructure/comm/mpi/ice_communicate.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_exit.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_exit.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/mpi/ice_exit.F90 rename to cicecore/cicedyn/infrastructure/comm/mpi/ice_exit.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_gather_scatter.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_gather_scatter.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/mpi/ice_gather_scatter.F90 rename to cicecore/cicedyn/infrastructure/comm/mpi/ice_gather_scatter.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_global_reductions.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 rename to cicecore/cicedyn/infrastructure/comm/mpi/ice_global_reductions.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_reprosum.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_reprosum.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/mpi/ice_reprosum.F90 rename to cicecore/cicedyn/infrastructure/comm/mpi/ice_reprosum.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_timers.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/mpi/ice_timers.F90 rename to cicecore/cicedyn/infrastructure/comm/mpi/ice_timers.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 similarity index 99% rename from cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 rename to cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 index f10a9f432..2b81c4441 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 +++ b/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 @@ -4686,9 +4686,6 @@ subroutine primary_grid_lengths_global_ext( & ! This subroutine adds ghost cells to global primary grid lengths array ! ARRAY_I and outputs result to array ARRAY_O -! Note duplicate implementation of this subroutine in: -! cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 - use ice_constants, only: c0 use ice_domain_size, only: nx_global, ny_global diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_broadcast.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_broadcast.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/serial/ice_broadcast.F90 rename to cicecore/cicedyn/infrastructure/comm/serial/ice_broadcast.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_communicate.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_communicate.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/serial/ice_communicate.F90 rename to cicecore/cicedyn/infrastructure/comm/serial/ice_communicate.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_exit.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_exit.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/serial/ice_exit.F90 rename to cicecore/cicedyn/infrastructure/comm/serial/ice_exit.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_gather_scatter.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_gather_scatter.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/serial/ice_gather_scatter.F90 rename to cicecore/cicedyn/infrastructure/comm/serial/ice_gather_scatter.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_global_reductions.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 rename to cicecore/cicedyn/infrastructure/comm/serial/ice_global_reductions.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_reprosum.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/serial/ice_reprosum.F90 rename to cicecore/cicedyn/infrastructure/comm/serial/ice_reprosum.F90 diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_timers.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/comm/serial/ice_timers.F90 rename to cicecore/cicedyn/infrastructure/comm/serial/ice_timers.F90 diff --git a/cicecore/cicedynB/infrastructure/ice_blocks.F90 b/cicecore/cicedyn/infrastructure/ice_blocks.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/ice_blocks.F90 rename to cicecore/cicedyn/infrastructure/ice_blocks.F90 diff --git a/cicecore/cicedynB/infrastructure/ice_domain.F90 b/cicecore/cicedyn/infrastructure/ice_domain.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/ice_domain.F90 rename to cicecore/cicedyn/infrastructure/ice_domain.F90 diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/ice_grid.F90 rename to cicecore/cicedyn/infrastructure/ice_grid.F90 diff --git a/cicecore/cicedynB/infrastructure/ice_memusage.F90 b/cicecore/cicedyn/infrastructure/ice_memusage.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/ice_memusage.F90 rename to cicecore/cicedyn/infrastructure/ice_memusage.F90 diff --git a/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c b/cicecore/cicedyn/infrastructure/ice_memusage_gptl.c similarity index 98% rename from cicecore/cicedynB/infrastructure/ice_memusage_gptl.c rename to cicecore/cicedyn/infrastructure/ice_memusage_gptl.c index 309c8824b..32b31171d 100644 --- a/cicecore/cicedynB/infrastructure/ice_memusage_gptl.c +++ b/cicecore/cicedyn/infrastructure/ice_memusage_gptl.c @@ -196,7 +196,7 @@ int ice_memusage_gptl (int *size, int *rss, int *share, int *text, int *datastac */ ret = fscanf (fd, "%d %d %d %d %d %d %d", - size, rss, share, text, datastack, &dum, &dum); + size, rss, share, text, datastack, &dum, &dum); ret = fclose (fd); return 0; diff --git a/cicecore/cicedynB/infrastructure/ice_read_write.F90 b/cicecore/cicedyn/infrastructure/ice_read_write.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/ice_read_write.F90 rename to cicecore/cicedyn/infrastructure/ice_read_write.F90 diff --git a/cicecore/cicedynB/infrastructure/ice_restart_driver.F90 b/cicecore/cicedyn/infrastructure/ice_restart_driver.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/ice_restart_driver.F90 rename to cicecore/cicedyn/infrastructure/ice_restart_driver.F90 diff --git a/cicecore/cicedynB/infrastructure/ice_restoring.F90 b/cicecore/cicedyn/infrastructure/ice_restoring.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/ice_restoring.F90 rename to cicecore/cicedyn/infrastructure/ice_restoring.F90 diff --git a/cicecore/cicedynB/infrastructure/ice_shr_reprosum86.c b/cicecore/cicedyn/infrastructure/ice_shr_reprosum86.c similarity index 100% rename from cicecore/cicedynB/infrastructure/ice_shr_reprosum86.c rename to cicecore/cicedyn/infrastructure/ice_shr_reprosum86.c diff --git a/cicecore/cicedynB/infrastructure/io/io_binary/ice_history_write.F90 b/cicecore/cicedyn/infrastructure/io/io_binary/ice_history_write.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/io/io_binary/ice_history_write.F90 rename to cicecore/cicedyn/infrastructure/io/io_binary/ice_history_write.F90 diff --git a/cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 b/cicecore/cicedyn/infrastructure/io/io_binary/ice_restart.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/io/io_binary/ice_restart.F90 rename to cicecore/cicedyn/infrastructure/io/io_binary/ice_restart.F90 diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 rename to cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_restart.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/io/io_netcdf/ice_restart.F90 rename to cicecore/cicedyn/infrastructure/io/io_netcdf/ice_restart.F90 diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 rename to cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_pio.F90 b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_pio.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/io/io_pio2/ice_pio.F90 rename to cicecore/cicedyn/infrastructure/io/io_pio2/ice_pio.F90 diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 similarity index 100% rename from cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 rename to cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 diff --git a/cicecore/cicedynB b/cicecore/cicedynB new file mode 120000 index 000000000..70695ca4b --- /dev/null +++ b/cicecore/cicedynB @@ -0,0 +1 @@ +cicedyn \ No newline at end of file diff --git a/configuration/scripts/cice.build b/configuration/scripts/cice.build index d75d74253..66b7b1321 100755 --- a/configuration/scripts/cice.build +++ b/configuration/scripts/cice.build @@ -128,12 +128,12 @@ endif ### List of source code directories (in order of importance). cat >! Filepath << EOF ${ICE_SANDBOX}/cicecore/drivers/${ICE_DRVOPT} -${ICE_SANDBOX}/cicecore/cicedynB/dynamics -${ICE_SANDBOX}/cicecore/cicedynB/general -${ICE_SANDBOX}/cicecore/cicedynB/analysis -${ICE_SANDBOX}/cicecore/cicedynB/infrastructure -${ICE_SANDBOX}/cicecore/cicedynB/infrastructure/io/$IODIR -${ICE_SANDBOX}/cicecore/cicedynB/infrastructure/comm/${ICE_COMMDIR} +${ICE_SANDBOX}/cicecore/cicedyn/dynamics +${ICE_SANDBOX}/cicecore/cicedyn/general +${ICE_SANDBOX}/cicecore/cicedyn/analysis +${ICE_SANDBOX}/cicecore/cicedyn/infrastructure +${ICE_SANDBOX}/cicecore/cicedyn/infrastructure/io/$IODIR +${ICE_SANDBOX}/cicecore/cicedyn/infrastructure/comm/${ICE_COMMDIR} ${ICE_SANDBOX}/cicecore/shared ${ICE_SANDBOX}/icepack/columnphysics EOF diff --git a/configuration/scripts/options/set_nml.dyneap b/configuration/scripts/options/set_nml.dyneap index 0a5140ac7..6ebab625e 100644 --- a/configuration/scripts/options/set_nml.dyneap +++ b/configuration/scripts/options/set_nml.dyneap @@ -1,2 +1 @@ kdyn = 2 - diff --git a/configuration/scripts/tests/decomp_suite.ts b/configuration/scripts/tests/decomp_suite.ts index c39c3ddfe..8d47506d6 100644 --- a/configuration/scripts/tests/decomp_suite.ts +++ b/configuration/scripts/tests/decomp_suite.ts @@ -3,6 +3,8 @@ restart gx3 4x2x25x29x4 dslenderX2 restart gx1 64x1x16x16x10 dwghtfile restart gbox180 16x1x6x6x60 dspacecurve,debugblocks decomp gx3 4x2x25x29x5 none +decomp gx3 4x2x25x29x5 dynpicard,reprosum +decomp gx3 4x2x25x29x5 dyneap restart gx3 1x1x50x58x4 droundrobin,thread restart_gx3_4x2x25x29x4_dslenderX2 restart gx3 4x1x25x116x1 dslenderX1,thread restart_gx3_4x2x25x29x4_dslenderX2 restart gx3 6x2x4x29x18 dspacecurve restart_gx3_4x2x25x29x4_dslenderX2 diff --git a/configuration/scripts/tests/first_suite.ts b/configuration/scripts/tests/first_suite.ts index b42d917ea..d9db20f6d 100644 --- a/configuration/scripts/tests/first_suite.ts +++ b/configuration/scripts/tests/first_suite.ts @@ -4,3 +4,9 @@ restart gx3 4x2x25x29x4 dslenderX2 smoke gx3 4x2x25x29x4 debug,run2day,dslenderX2 smoke gx3 4x2x25x29x4 dslenderX2,diag1,reprosum,cmplog smoke gx3 1x2 run2day +smoke gx3 1x1x100x116x1 reprosum,run10day +smoke gx1 32x1x16x16x32 reprosum,run10day +smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd +smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd +smoke gx3 1x1x100x116x1 reprosum,run10day,gridc +smoke gx1 32x1x16x16x32 reprosum,run10day,gridc diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index d9752073f..faf01344a 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -1,4 +1,11 @@ # Test Grid PEs Sets BFB-compare +smoke gx3 1x1x100x116x1 reprosum,run10day +smoke gx1 32x1x16x16x32 reprosum,run10day +smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd +smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd +smoke gx3 1x1x100x116x1 reprosum,run10day,gridc +smoke gx1 32x1x16x16x32 reprosum,run10day,gridc + smoke gx3 8x2 diag1,run5day smoke gx3 8x4 diag1,run5day,debug restart gx3 4x2 debug,diag1 @@ -12,11 +19,9 @@ smoke gbox80 4x2 boxclosed,boxforcee,run1day smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid -smoke gx3 1x1x100x116x1 reprosum,run10day smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day -smoke gx1 32x1x16x16x32 reprosum,run10day smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16x32_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16x32_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest smoke_gx1_32x1x16x16x32_reprosum_run10day @@ -34,11 +39,9 @@ smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridcd smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridcd smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridcd smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid,gridcd -smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day -smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day @@ -56,11 +59,9 @@ smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridc smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridc smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridc smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid,gridc -smoke gx3 1x1x100x116x1 reprosum,run10day,gridc smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day -smoke gx1 32x1x16x16x32 reprosum,run10day,gridc smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day diff --git a/configuration/scripts/tests/omp_suite.ts b/configuration/scripts/tests/omp_suite.ts index 686fa72db..c9bbae0a2 100644 --- a/configuration/scripts/tests/omp_suite.ts +++ b/configuration/scripts/tests/omp_suite.ts @@ -1,5 +1,7 @@ # Test Grid PEs Sets BFB-compare +#gridB + smoke gx3 8x4 diag1,reprosum,run10day smoke gx3 6x2 alt01,reprosum,run10day smoke gx3 8x2 alt02,reprosum,run10day @@ -25,6 +27,62 @@ smoke gbox128 14x2 boxrestore,reprosum,run10day smoke gbox80 4x5 box2001,reprosum,run10day smoke gbox80 11x3 boxslotcyl,reprosum,run10day +#gridC + +smoke gx3 8x4 diag1,reprosum,run10day,gridc +smoke gx3 6x2 alt01,reprosum,run10day,gridc +smoke gx3 8x2 alt02,reprosum,run10day,gridc +#smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridc +smoke gx3 4x4 alt04,reprosum,run10day,gridc +smoke gx3 4x4 alt05,reprosum,run10day,gridc +smoke gx3 8x2 alt06,reprosum,run10day,gridc +smoke gx3 7x2 alt07,reprosum,run10day,gridc +smoke gx3 8x2 bgczm,reprosum,run10day,gridc +smoke gx1 15x2 reprosum,run10day,gridc +smoke gx1 15x2 seabedprob,reprosum,run10day,gridc +smoke gx3 14x2 fsd12,reprosum,run10day,gridc +smoke gx3 11x2 isotope,reprosum,run10day,gridc +smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridc +#smoke gx3 6x4 dynpicard,reprosum,run10day,gridc +smoke gx3 8x3 zsal,reprosum,run10day,gridc +smoke gx3 1x1x100x116x1 reprosum,run10day,gridc,thread + +smoke gbox128 8x2 reprosum,run10day,gridc +smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridc +#smoke gbox128 9x2 boxadv,reprosum,run10day,gridc +smoke gbox128 14x2 boxrestore,reprosum,run10day,gridc +smoke gbox80 4x5 box2001,reprosum,run10day,gridc +smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridc + +#gridCD + +smoke gx3 8x4 diag1,reprosum,run10day,gridcd +smoke gx3 6x2 alt01,reprosum,run10day,gridcd +smoke gx3 8x2 alt02,reprosum,run10day,gridcd +#smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridcd +smoke gx3 4x4 alt04,reprosum,run10day,gridcd +smoke gx3 4x4 alt05,reprosum,run10day,gridcd +smoke gx3 8x2 alt06,reprosum,run10day,gridcd +smoke gx3 7x2 alt07,reprosum,run10day,gridcd +smoke gx3 8x2 bgczm,reprosum,run10day,gridcd +smoke gx1 15x2 reprosum,run10day,gridcd +smoke gx1 15x2 seabedprob,reprosum,run10day,gridcd +smoke gx3 14x2 fsd12,reprosum,run10day,gridcd +smoke gx3 11x2 isotope,reprosum,run10day,gridcd +smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridcd +#smoke gx3 6x4 dynpicard,reprosum,run10day,gridcd +smoke gx3 8x3 zsal,reprosum,run10day,gridcd +smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd,thread + +smoke gbox128 8x2 reprosum,run10day,gridcd +smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridcd +#smoke gbox128 9x2 boxadv,reprosum,run10day,gridcd +smoke gbox128 14x2 boxrestore,reprosum,run10day,gridcd +smoke gbox80 4x5 box2001,reprosum,run10day,gridcd +smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridcd + +#gridB + smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest smoke_gx3_8x4_diag1_reprosum_run10day smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_diag1_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread smoke_gx3_6x2_alt01_reprosum_run10day @@ -54,31 +112,6 @@ smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread #gridC -smoke gx3 8x4 diag1,reprosum,run10day,gridc -smoke gx3 6x2 alt01,reprosum,run10day,gridc -smoke gx3 8x2 alt02,reprosum,run10day,gridc -#smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridc -smoke gx3 4x4 alt04,reprosum,run10day,gridc -smoke gx3 4x4 alt05,reprosum,run10day,gridc -smoke gx3 8x2 alt06,reprosum,run10day,gridc -smoke gx3 7x2 alt07,reprosum,run10day,gridc -smoke gx3 8x2 bgczm,reprosum,run10day,gridc -smoke gx1 15x2 reprosum,run10day,gridc -smoke gx1 15x2 seabedprob,reprosum,run10day,gridc -smoke gx3 14x2 fsd12,reprosum,run10day,gridc -smoke gx3 11x2 isotope,reprosum,run10day,gridc -smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridc -#smoke gx3 6x4 dynpicard,reprosum,run10day,gridc -smoke gx3 8x3 zsal,reprosum,run10day,gridc -smoke gx3 1x1x100x116x1 reprosum,run10day,gridc,thread - -smoke gbox128 8x2 reprosum,run10day,gridc -smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridc -#smoke gbox128 9x2 boxadv,reprosum,run10day,gridc -smoke gbox128 14x2 boxrestore,reprosum,run10day,gridc -smoke gbox80 4x5 box2001,reprosum,run10day,gridc -smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridc - smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridc smoke_gx3_8x4_diag1_gridc_reprosum_run10day smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_diag1_gridc_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x2_alt01_gridc_reprosum_run10day @@ -108,31 +141,6 @@ smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread #gridCD -smoke gx3 8x4 diag1,reprosum,run10day,gridcd -smoke gx3 6x2 alt01,reprosum,run10day,gridcd -smoke gx3 8x2 alt02,reprosum,run10day,gridcd -#smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridcd -smoke gx3 4x4 alt04,reprosum,run10day,gridcd -smoke gx3 4x4 alt05,reprosum,run10day,gridcd -smoke gx3 8x2 alt06,reprosum,run10day,gridcd -smoke gx3 7x2 alt07,reprosum,run10day,gridcd -smoke gx3 8x2 bgczm,reprosum,run10day,gridcd -smoke gx1 15x2 reprosum,run10day,gridcd -smoke gx1 15x2 seabedprob,reprosum,run10day,gridcd -smoke gx3 14x2 fsd12,reprosum,run10day,gridcd -smoke gx3 11x2 isotope,reprosum,run10day,gridcd -smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridcd -#smoke gx3 6x4 dynpicard,reprosum,run10day,gridcd -smoke gx3 8x3 zsal,reprosum,run10day,gridcd -smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd,thread - -smoke gbox128 8x2 reprosum,run10day,gridcd -smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridcd -#smoke gbox128 9x2 boxadv,reprosum,run10day,gridcd -smoke gbox128 14x2 boxrestore,reprosum,run10day,gridcd -smoke gbox80 4x5 box2001,reprosum,run10day,gridcd -smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridcd - smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridcd smoke_gx3_8x4_diag1_gridcd_reprosum_run10day smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_diag1_gridcd_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x2_alt01_gridcd_reprosum_run10day diff --git a/doc/source/developer_guide/dg_dynamics.rst b/doc/source/developer_guide/dg_dynamics.rst index 48dead1cb..1f1430e71 100644 --- a/doc/source/developer_guide/dg_dynamics.rst +++ b/doc/source/developer_guide/dg_dynamics.rst @@ -9,14 +9,14 @@ Dynamics The CICE **cicecore/** directory consists of the non icepack source code. Within that directory there are the following subdirectories -**cicecore/cicedynB/analysis** contains higher level history and diagnostic routines. +**cicecore/cicedyn/analysis** contains higher level history and diagnostic routines. -**cicecore/cicedynB/dynamics** contains all the dynamical evp, eap, and transport routines. +**cicecore/cicedyn/dynamics** contains all the dynamical evp, eap, and transport routines. -**cicecore/cicedynB/general** contains routines associated with forcing, flux calculation, +**cicecore/cicedyn/general** contains routines associated with forcing, flux calculation, initialization, and model timestepping. -**cicecore/cicedynB/infrastructure** contains most of the low-level infrastructure associated +**cicecore/cicedyn/infrastructure** contains most of the low-level infrastructure associated with communication (halo updates, gather, scatter, global sums, etc) and I/O reading and writing binary and netcdf files. @@ -29,7 +29,7 @@ coupling layers. Dynamical Solvers -------------------- -The dynamics solvers are found in **cicecore/cicedynB/dynamics/**. A couple of different solvers are +The dynamics solvers are found in **cicecore/cicedyn/dynamics/**. A couple of different solvers are available including EVP, EAP and VP. The dynamics solver is specified in namelist with the ``kdyn`` variable. ``kdyn=1`` is evp, ``kdyn=2`` is eap, ``kdyn=3`` is VP. @@ -41,7 +41,7 @@ with the tripole grid. Transport ----------------- -The transport (advection) methods are found in **cicecore/cicedynB/dynamics/**. Two methods are supported, +The transport (advection) methods are found in **cicecore/cicedyn/dynamics/**. Two methods are supported, upwind and remap. These are set in namelist via the ``advection`` variable. Transport can be disabled with the ``ktransport`` namelist variable. @@ -94,11 +94,11 @@ Two low-level communications packages, mpi and serial, are provided as part of C provides a middle layer between the model and the underlying libraries. Only the CICE mpi or serial directories are compiled with CICE, not both. -**cicedynB/infrastructure/comm/mpi/** +**cicedyn/infrastructure/comm/mpi/** is based on MPI and provides various methods to do halo updates, global sums, gather/scatter, broadcasts and similar using some fairly generic interfaces to isolate the MPI calls in the code. -**cicedynB/infrastructure/comm/serial/** support the same interfaces, but operates +**cicedyn/infrastructure/comm/serial/** support the same interfaces, but operates in shared memory mode with no MPI. The serial library will be used, by default in the CICE scripts, if the number of MPI tasks is set to 1. The serial library allows the model to be run on a single core or with OpenMP parallelism only without requiring an MPI library. @@ -112,15 +112,15 @@ Only one of the three IO directories can be built with CICE. The CICE scripts w by default, but other options can be selecting by setting ``ICE_IOTYPE`` in **cice.settings** in the case. This has to be set before CICE is built. -**cicedynB/infrastructure/io/io_netcdf/** is the +**cicedyn/infrastructure/io/io_netcdf/** is the default for the standalone CICE model, and it supports writing history and restart files in netcdf format using standard netcdf calls. It does this by writing from and reading to the root task and gathering and scattering fields from the root task to support model parallelism. -**cicedynB/infrastructure/io/io_binary/** supports files in binary format using a gather/scatter +**cicedyn/infrastructure/io/io_binary/** supports files in binary format using a gather/scatter approach and reading to and writing from the root task. -**cicedynB/infrastructure/io/io_pio/** support reading and writing through the pio interface. pio +**cicedyn/infrastructure/io/io_pio/** support reading and writing through the pio interface. pio is a parallel io library (https://github.com/NCAR/ParallelIO) that supports reading and writing of binary and netcdf file through various interfaces including netcdf and pnetcdf. pio is generally more parallel in memory even when using serial netcdf than the standard gather/scatter methods, diff --git a/doc/source/developer_guide/dg_forcing.rst b/doc/source/developer_guide/dg_forcing.rst index d3c406b43..0b90a9b2e 100644 --- a/doc/source/developer_guide/dg_forcing.rst +++ b/doc/source/developer_guide/dg_forcing.rst @@ -15,7 +15,7 @@ generally not been maintained by the Consortium and only a subset of the code is tested by the Consortium. The forcing implementation can be found in the file -**cicecore/cicedynB/general/ice_forcing.F90**. As noted above, only a subset of the +**cicecore/cicedyn/general/ice_forcing.F90**. As noted above, only a subset of the forcing modes are tested and supported. In many ways, the implemetation is fairly primitive, in part due to historical reasons and in part because standalone runs are discouraged for evaluating complex science. In general, most implementations diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index a34c69822..bf227e145 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -847,13 +847,13 @@ icefields_nml There are several icefield namelist groups to control model history output. See the source code for a full list of supported output fields. -* ``icefields_nml`` is in **cicecore/cicedynB/analysis/ice_history_shared.F90** -* ``icefields_bgc_nml`` is in **cicecore/cicedynB/analysis/ice_history_bgc.F90** -* ``icefields_drag_nml`` is in **cicecore/cicedynB/analysis/ice_history_drag.F90** -* ``icefields_fsd_nml`` is in **cicecore/cicedynB/analysis/ice_history_fsd.F90** -* ``icefields_mechred_nml`` is in **cicecore/cicedynB/analysis/ice_history_mechred.F90** -* ``icefields_pond_nml`` is in **cicecore/cicedynB/analysis/ice_history_pond.F90** -* ``icefields_snow_nml`` is in **cicecore/cicedynB/analysis/ice_history_snow.F90** +* ``icefields_nml`` is in **cicecore/cicedyn/analysis/ice_history_shared.F90** +* ``icefields_bgc_nml`` is in **cicecore/cicedyn/analysis/ice_history_bgc.F90** +* ``icefields_drag_nml`` is in **cicecore/cicedyn/analysis/ice_history_drag.F90** +* ``icefields_fsd_nml`` is in **cicecore/cicedyn/analysis/ice_history_fsd.F90** +* ``icefields_mechred_nml`` is in **cicecore/cicedyn/analysis/ice_history_mechred.F90** +* ``icefields_pond_nml`` is in **cicecore/cicedyn/analysis/ice_history_pond.F90** +* ``icefields_snow_nml`` is in **cicecore/cicedyn/analysis/ice_history_snow.F90** .. csv-table:: **icefields_nml namelist options** :header: "variable", "options/format", "description", "default value" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index a7cc66948..5ed2092c0 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -47,7 +47,7 @@ as follows **cicecore/** CICE source code -**cicecore/cicedynB/** +**cicecore/cicedyn/** routines associated with the dynamics core **cicecore/drivers/** diff --git a/doc/source/user_guide/ug_testing.rst b/doc/source/user_guide/ug_testing.rst index 284de72f1..289f626a9 100644 --- a/doc/source/user_guide/ug_testing.rst +++ b/doc/source/user_guide/ug_testing.rst @@ -23,7 +23,8 @@ The testing scripts support several features - Ability to compare results to prior baselines to verify bit-for-bit (``--bcmp``) - Ability to define where baseline tests are stored (``--bdir``) - Ability to compare tests against each other (``--diff``) - - Ability to set account number (``--acct``), which is otherwise not set and may result in tests not being submitted + - Ability to set or overide the batch account number (``--acct``) and queue name (``--queue``) + - Ability to control how test suites execute (``--setup-only``, ``--setup-build``, ``--setup-build-run``, ``--setup-build-submit``) .. _indtests: @@ -301,22 +302,6 @@ results.csh script in the testsuite.[testid]:: cd testsuite.[testid] ./results.csh -The script **create_fails.csh** will process the output from results.csh and generate a new -test suite file, **fails.ts**, from the failed tests. -**fails.ts** can then be edited and passed into ``cice.setup --suite fails.ts ...`` to rerun -subsets of failed tests to more efficiently move thru the development, testing, and -validation process. However, a full test suite should be run on the final development -version of the code. - -To report the test results, as is required for Pull Requests to be accepted into -the master the CICE Consortium code see :ref:`testreporting`. - -If using the ``--tdir`` option, that directory must not exist before the script is run. The tdir directory will be -created by the script and it will be populated by all tests as well as scripts that support the -test suite:: - - ./cice.setup --suite base_suite --mach wolf --env gnu --testid myid --tdir /scratch/$user/testsuite.myid - Multiple suites are supported on the command line as comma separated arguments:: ./cice.setup --suite base_suite,decomp_suite --mach wolf --env gnu --testid myid @@ -329,9 +314,48 @@ The option settings defined at the command line have precedence over the test su values if there are conflicts. The predefined test suites are defined under **configuration/scripts/tests** and -the files defining the suites -have a suffix of .ts in that directory. The format for the test suite file -is relatively simple. +the files defining the suites have a suffix of .ts in that directory. Some of the +available tests suites are + +``quick_suite`` + consists of a handful of basic CICE tests + +``base_suite`` + consists of a much large suite of tests covering much of the CICE functionality + +``decomp_suite`` + checks that different decompositions and pe counts produce bit-for-bit results + +``omp_suite`` + checks that OpenMP single thread and multi-thread cases are bit-for-bit identical + +``io_suite`` + tests the various IO options including binary, netcdf, and pio. PIO should be installed locally and accessible to the CICE build system to make full use of this suite. + +``perf_suite`` + runs a series of tests to evaluate model scaling and performance + +``reprosum_suite`` + verifies that CICE log files are bit-for-bit with different decompositions and pe counts when the bfbflag is set to reprosum + +``gridsys_suite`` + tests B, C, and CD grid_ice configurations + +``prod_suite`` + consists of a handful of tests running 5 to 10 model years and includes some QC testing. These tests will be relatively expensive and take more time compared to other suites. + +``unittest_suite`` + runs unit tests in the CICE repository + +``travis_suite`` + consists of a small suite of tests suitable for running on low pe counts. This is the suite used with Github Actions for CI in the workflow. + +``first_suite`` + this small suite of tests is redundant with tests in other suites. It runs several of the critical baseline tests that other test compare to. It can improve testing turnaround if listed first in a series of test suites. + +When running multiple suites on the command line (i.e. ``--suite first_suite,base_suite,omp_suite``) the suites will be run in the order defined by the user and redundant tests across multiple suites will be created and executed only once. + +The format for the test suite file is relatively simple. It is a text file with white space delimited columns that define a handful of values in a specific order. The first column is the test name, the second the grid, the third the pe count, @@ -423,6 +447,22 @@ which means by default the test suite builds and submits the jobs. By defining By leveraging the **cice.setup** command line arguments ``--setup-only``, ``--setup-build``, and ``--setup-build-run`` as well as the environment variables SUITE_BUILD, SUITE_RUN, and SUITE_SUBMIT, users can run **cice.setup** and **suite.submit** in various combinations to quickly setup, setup and build, submit, resubmit, run interactively, or rebuild and resubmit full testsuites quickly and easily. See :ref:`examplesuites` for an example. +The script **create_fails.csh** will process the output from results.csh and generate a new +test suite file, **fails.ts**, from the failed tests. +**fails.ts** can then be edited and passed into ``cice.setup --suite fails.ts ...`` to rerun +subsets of failed tests to more efficiently move thru the development, testing, and +validation process. However, a full test suite should be run on the final development +version of the code. + +To report the test results, as is required for Pull Requests to be accepted into +the master the CICE Consortium code see :ref:`testreporting`. + +If using the ``--tdir`` option, that directory must not exist before the script is run. The tdir directory will be +created by the script and it will be populated by all tests as well as scripts that support the +test suite:: + + ./cice.setup --suite base_suite --mach wolf --env gnu --testid myid --tdir /scratch/$user/testsuite.myid + .. _examplesuites: @@ -695,9 +735,12 @@ The following are brief descriptions of some of the current unit tests, both sets of software are tested independently and correctness is verified. - **calchk** is a unit test that exercises the CICE calendar over 100,000 years and verifies correctness. This test does not depend on the CICE initialization. + - **gridavgchk** is a unit test that exercises the CICE grid_average_X2Y methods and verifies results. - **helloworld** is a simple test that writes out helloworld and uses no CICE infrastructure. This tests exists to demonstrate how to build a unit test by specifying the object files directly in the Makefile + - **optargs** is a unit test that tests passing optional arguments down a calling tree and verifying + that the optional attribute is preserved correctly. - **sumchk** is a unit test that exercises the methods in ice_global_reductions.F90. This test requires that a CICE grid and decomposition be initialized, so CICE_InitMod.F90 is leveraged to initialize the model prior to running a suite of unit validation tests to verify correctness. diff --git a/doc/source/user_guide/ug_troubleshooting.rst b/doc/source/user_guide/ug_troubleshooting.rst index d20b14ffc..315b2f869 100644 --- a/doc/source/user_guide/ug_troubleshooting.rst +++ b/doc/source/user_guide/ug_troubleshooting.rst @@ -7,6 +7,16 @@ Troubleshooting Check the FAQ: https://github.com/CICE-Consortium/CICE/wiki +.. _dirsetup: + +Directory Structure +--------------------- + +In November, 2022, the cicedynB directory was renamed to cicedyn. +A soft link was temporarily added to preserve the ability to use +cicedynB as a path when compiling CICE in other build systems. This +soft link will be removed in the future. + .. _setup: Initial setup @@ -221,6 +231,16 @@ be found in the `Icepack documentation `_. +VP dynamics results +---------------------------------------- + +The VP dynamics solver (`kdyn=3`) requires a global sum. This global sum +is computed by default via an efficient implementation that is not bit-for-bit +for different decompositions or pe counts. Bit-for-bit identical results +can be recovered for the VP dynamics solver by setting the namelist +`bfbflag = reprosum` or using the `-s reprosum` option when setting up a case. + + Proliferating subprocess parameterizations ------------------------------------------------------- From ed4855e6c2ac81c9119a7861c82b58b2022f4cb9 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Fri, 18 Nov 2022 21:10:22 -0800 Subject: [PATCH 40/57] Update Icepack (#794) --- icepack | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/icepack b/icepack index 18fc1c9b7..493373809 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 18fc1c9b79d81604eafdb1fac4ddd039b78ad390 +Subproject commit 493373809c85561f0c48f6255b713b7ec0642a8d From cb58257857d429c4c2dc4185d16c3f991e379271 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Sun, 20 Nov 2022 12:43:43 -0800 Subject: [PATCH 41/57] Trap solve_zsal = true at initialization (#795) * Trap solve_zsal = true at initialization - remove zsal tests - rearrange a few test suites * Update documentation for zsalinty * update test suite * Update Icepack to latest --- cicecore/shared/ice_init_column.F90 | 9 ++++++ configuration/scripts/tests/base_suite.ts | 2 -- configuration/scripts/tests/first_suite.ts | 21 +++++++++----- configuration/scripts/tests/omp_suite.ts | 8 ++---- configuration/scripts/tests/perf_suite.ts | 33 +++++++++++----------- doc/source/science_guide/sg_tracers.rst | 2 +- doc/source/user_guide/ug_case_settings.rst | 4 +-- icepack | 2 +- 8 files changed, 45 insertions(+), 36 deletions(-) diff --git a/cicecore/shared/ice_init_column.F90 b/cicecore/shared/ice_init_column.F90 index 06ab79cdb..0d06b0aac 100644 --- a/cicecore/shared/ice_init_column.F90 +++ b/cicecore/shared/ice_init_column.F90 @@ -1477,6 +1477,14 @@ subroutine input_zbgc restart_zsal = .false. endif + if (solve_zsal) then + if (my_task == master_task) then + write(nu_diag,*) subname,' ERROR: solve_zsal=T deprecated' + endif + abort_flag = 101 + endif + +#ifdef UNDEPRECATE_ZSAL if (solve_zsal .and. nblyr < 1) then if (my_task == master_task) then write(nu_diag,*) subname,' ERROR: solve_zsal=T but 0 zsalinity tracers' @@ -1490,6 +1498,7 @@ subroutine input_zbgc endif abort_flag = 102 endif +#endif if (tr_brine .and. nblyr < 1 ) then if (my_task == master_task) then diff --git a/configuration/scripts/tests/base_suite.ts b/configuration/scripts/tests/base_suite.ts index 3007380ab..b659fcb19 100644 --- a/configuration/scripts/tests/base_suite.ts +++ b/configuration/scripts/tests/base_suite.ts @@ -70,8 +70,6 @@ restart gx3 4x4 histall,precision8,cdf64 smoke gx3 30x1 bgcz,histall smoke gx3 14x2 fsd12,histall smoke gx3 4x1 dynpicard -smoke gx3 8x2 diag24,run5day,zsal,debug -restart gx3 8x2 zsal restart gx3 8x2 gx3ncarbulk,debug restart gx3 4x4 gx3ncarbulk,diag1 smoke gx3 4x1 calcdragio diff --git a/configuration/scripts/tests/first_suite.ts b/configuration/scripts/tests/first_suite.ts index d9db20f6d..bef24d9eb 100644 --- a/configuration/scripts/tests/first_suite.ts +++ b/configuration/scripts/tests/first_suite.ts @@ -1,12 +1,19 @@ # Test Grid PEs Sets BFB-compare smoke gx3 8x2 diag1,run5day +# decomp_suite restart gx3 4x2x25x29x4 dslenderX2 smoke gx3 4x2x25x29x4 debug,run2day,dslenderX2 -smoke gx3 4x2x25x29x4 dslenderX2,diag1,reprosum,cmplog +# reprosum_suite +smoke gx3 4x2x25x29x4 dslenderX2,diag1,reprosum +# travis_suite smoke gx3 1x2 run2day -smoke gx3 1x1x100x116x1 reprosum,run10day -smoke gx1 32x1x16x16x32 reprosum,run10day -smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd -smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd -smoke gx3 1x1x100x116x1 reprosum,run10day,gridc -smoke gx1 32x1x16x16x32 reprosum,run10day,gridc +# gridsys_suite +smoke gx3 1x1x100x116x1 reprosum,run10day +smoke gx1 32x1x16x16x32 reprosum,run10day +smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd +smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd +smoke gx3 1x1x100x116x1 reprosum,run10day,gridc +smoke gx1 32x1x16x16x32 reprosum,run10day,gridc +# perf_suite +smoke gx1 32x1x16x16x15 run2day,droundrobin +smoke gx1 64x1x16x16x8 run2day,droundrobin,thread diff --git a/configuration/scripts/tests/omp_suite.ts b/configuration/scripts/tests/omp_suite.ts index c9bbae0a2..62630e874 100644 --- a/configuration/scripts/tests/omp_suite.ts +++ b/configuration/scripts/tests/omp_suite.ts @@ -17,7 +17,6 @@ smoke gx3 14x2 fsd12,reprosum,run10day smoke gx3 11x2 isotope,reprosum,run10day smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day smoke gx3 6x4 dynpicard,reprosum,run10day -smoke gx3 8x3 zsal,reprosum,run10day smoke gx3 1x1x100x116x1 reprosum,run10day,thread smoke gbox128 8x2 reprosum,run10day @@ -44,7 +43,6 @@ smoke gx3 14x2 fsd12,reprosum,run10day,gridc smoke gx3 11x2 isotope,reprosum,run10day,gridc smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridc #smoke gx3 6x4 dynpicard,reprosum,run10day,gridc -smoke gx3 8x3 zsal,reprosum,run10day,gridc smoke gx3 1x1x100x116x1 reprosum,run10day,gridc,thread smoke gbox128 8x2 reprosum,run10day,gridc @@ -71,7 +69,6 @@ smoke gx3 14x2 fsd12,reprosum,run10day,gridcd smoke gx3 11x2 isotope,reprosum,run10day,gridcd smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridcd #smoke gx3 6x4 dynpicard,reprosum,run10day,gridcd -smoke gx3 8x3 zsal,reprosum,run10day,gridcd smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd,thread smoke gbox128 8x2 reprosum,run10day,gridcd @@ -81,6 +78,8 @@ smoke gbox128 14x2 boxrestore,reprosum,run10day,gridcd smoke gbox80 4x5 box2001,reprosum,run10day,gridcd smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridcd +sleep 180 + #gridB smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest smoke_gx3_8x4_diag1_reprosum_run10day @@ -99,7 +98,6 @@ smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread smoke_gx3_11x2_isotope_reprosum_run10day smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_icdefault_reprosum_run10day_snwgrain_snwitdrdg smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread smoke_gx3_6x4_dynpicard_reprosum_run10day -smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread smoke_gx3_8x3_reprosum_run10day_zsal smoke gx3 4x2x25x29x4 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day_thread smoke gx3 8x4x5x4x80 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day_thread @@ -128,7 +126,6 @@ smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,grid smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_11x2_gridc_isotope_reprosum_run10day smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_icdefault_reprosum_run10day_snwgrain_snwitdrdg #smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x4_dynpicard_gridc_reprosum_run10day -smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x3_gridc_reprosum_run10day_zsal smoke gx3 4x2x25x29x4 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day_thread smoke gx3 8x4x5x4x80 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day_thread @@ -157,7 +154,6 @@ smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,grid smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_11x2_gridcd_isotope_reprosum_run10day smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_icdefault_reprosum_run10day_snwgrain_snwitdrdg #smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x4_dynpicard_gridcd_reprosum_run10day -smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x3_gridcd_reprosum_run10day_zsal smoke gx3 4x2x25x29x4 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day_thread smoke gx3 8x4x5x4x80 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day_thread diff --git a/configuration/scripts/tests/perf_suite.ts b/configuration/scripts/tests/perf_suite.ts index 9a17d8a55..a4d8ef588 100644 --- a/configuration/scripts/tests/perf_suite.ts +++ b/configuration/scripts/tests/perf_suite.ts @@ -1,25 +1,24 @@ # Test Grid PEs Sets BFB-compare -smoke gx1 1x1x320x384x1 run2day,droundrobin +smoke gx1 32x1x16x16x15 run2day,droundrobin smoke gx1 64x1x16x16x8 run2day,droundrobin,thread -sleep 180 # -smoke gx1 1x1x320x384x1 run2day,droundrobin -smoke gx1 1x1x160x192x4 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 1x1x80x96x16 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 1x1x40x48x64 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 1x1x20x24x256 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 1x1x320x384x1 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 1x1x160x192x4 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 1x1x80x96x16 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 1x1x40x48x64 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 1x1x20x24x256 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day # -smoke gx1 1x1x16x16x480 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 2x1x16x16x240 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 4x1x16x16x120 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 8x1x16x16x60 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 16x1x16x16x30 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 32x1x16x16x15 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 64x1x16x16x8 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 128x1x16x16x4 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 1x1x16x16x480 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 2x1x16x16x240 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 4x1x16x16x120 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 8x1x16x16x60 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 16x1x16x16x30 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +#smoke gx1 32x1x16x16x15 run2day,droundrobin +smoke gx1 64x1x16x16x8 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 128x1x16x16x4 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day # -smoke gx1 64x1x16x16x8 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 64x1x16x16x8 run2day,droundrobin,thread +smoke gx1 64x1x16x16x8 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +#smoke gx1 64x1x16x16x8 run2day,droundrobin,thread smoke gx1 32x2x16x16x16 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread smoke gx1 16x4x16x16x32 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread smoke gx1 8x8x16x16x64 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread diff --git a/doc/source/science_guide/sg_tracers.rst b/doc/source/science_guide/sg_tracers.rst index b75edfb00..cbecb9310 100644 --- a/doc/source/science_guide/sg_tracers.rst +++ b/doc/source/science_guide/sg_tracers.rst @@ -92,7 +92,6 @@ is not in use. " ","nslyr","vsno","nt_rhos"," " " ","nslyr","vsno","nt_smice"," " " ","nslyr","vsno","nt_smliq"," " - "solve_zsal", "n_trzs", "fbri or (a,v)ice", "nt_bgc_S", " " "tr_bgc_N", "n_algae", "fbri or (a,v)ice", "nt_bgc_N", "nlt_bgc_N" "tr_bgc_Nit", " ", "fbri or (a,v)ice", "nt_bgc_Nit", "nlt_bgc_Nit" "tr_bgc_C", "n_doc", "fbri or (a,v)ice", "nt_bgc_DOC", "nlt_bgc_DOC" @@ -112,6 +111,7 @@ is not in use. " ", "1", "fbri", "nt_zbgc_frac", " " .. + "solve_zsal", "n_trzs", "fbri or (a,v)ice", "nt_bgc_S", " " "tr_pond_cesm", "2", "aice", "nt_apnd", " " " ", " ", "apnd", "nt_vpnd", " " diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index bf227e145..74f8ab11f 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -804,14 +804,14 @@ zbgc_nml "``ratio_S2N_sp``", "real", "algal S to N in mol/mol small plankton", "0.03" "``restart_bgc``", "logical", "restart tracer values from file", "``.false.``" "``restart_hbrine``", "logical", "", "``.false.``" - "``restart_zsal``", "logical", "", "``.false.``" + "``restart_zsal``", "logical", "zsalinity DEPRECATED", "``.false.``" "``restore_bgc``", "logical", "restore bgc to data", "``.false.``" "``R_dFe2dust``", "real", "g/g :cite:`Tagliabue09`", "0.035" "``scale_bgc``", "logical", "", "``.false.``" "``silicatetype``", "real", "mobility type between stationary and mobile silicate", "-1.0" "``skl_bgc``", "logical", "biogeochemistry", "``.false.``" "``solve_zbgc``", "logical", "", "``.false.``" - "``solve_zsal``", "logical", "update salinity tracer profile", "``.false.``" + "``solve_zsal``", "logical", "zsalinity DEPRECATED, update salinity tracer profile", "``.false.``" "``tau_max``", "real", "long time mobile to stationary exchanges", "1.73e-5" "``tau_min``", "real", "rapid module to stationary exchanges", "5200." "``tr_bgc_Am``", "logical", "ammonium tracer", "``.false.``" diff --git a/icepack b/icepack index 493373809..cabfe0f11 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 493373809c85561f0c48f6255b713b7ec0642a8d +Subproject commit cabfe0f111b61057db16cd8891f83ea4ad447a8a From b16d7fdaa081ec231b166ffb443a99dfb754b42a Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Fri, 2 Dec 2022 15:35:12 -0800 Subject: [PATCH 42/57] Update Icepack to #5999551 including snowbrinebugs (#797) * Update Icepack to #5999551 Update advection description in document Update f_bound setting in ice_history.F90 --- cicecore/cicedyn/analysis/ice_history.F90 | 4 ---- doc/source/science_guide/sg_horiztrans.rst | 9 ++++++--- icepack | 2 +- 3 files changed, 7 insertions(+), 8 deletions(-) diff --git a/cicecore/cicedyn/analysis/ice_history.F90 b/cicecore/cicedyn/analysis/ice_history.F90 index 2142310b9..70288fc6f 100644 --- a/cicecore/cicedyn/analysis/ice_history.F90 +++ b/cicecore/cicedyn/analysis/ice_history.F90 @@ -420,10 +420,6 @@ subroutine init_hist (dt) f_taubyE = f_tauby endif -#ifndef ncdf - f_bounds = .false. -#endif - ! write dimensions for 3D or 4D history variables ! note: list of variables checked here is incomplete if (f_aicen(1:1) /= 'x' .or. f_vicen(1:1) /= 'x' .or. & diff --git a/doc/source/science_guide/sg_horiztrans.rst b/doc/source/science_guide/sg_horiztrans.rst index f85f13ee5..d66046465 100644 --- a/doc/source/science_guide/sg_horiztrans.rst +++ b/doc/source/science_guide/sg_horiztrans.rst @@ -35,8 +35,11 @@ versions but have not yet been implemented. Two transport schemes are available: upwind and the incremental remapping scheme of :cite:`Dukowicz00` as modified for sea ice by -:cite:`Lipscomb04`. The upwind scheme is naturally suited for a C grid discretization. As such, the C grid velocity components (i.e. :math:`uvelE=u` at the E point and :math:`vvelN=v` at the N point) are directly passed to the upwind transport scheme. On the other hand, if the B grid is used, :math:`uvel` and :math:`vvel` (respectively :math:`u` and :math:`v` at the U point) are interpolated to the E and N points such that the upwind advection can be performed. Conversely, as the remapping scheme was originally developed for B grid applications, :math:`uvel` and :math:`vvel` are directly used for the advection. If the remapping scheme is used for the C grid, :math:`uvelE` and :math:`vvelN` are first interpolated to the U points before performing the advection. +:cite:`Lipscomb04`. +- The upwind scheme uses velocity points at the East and North face (i.e. :math:`uvelE=u` at the E point and :math:`vvelN=v` at the N point) of a T gridcell. As such, the prognostic C grid velocity components (:math:`uvelE` and :math:`vvelN`) can be passed directly to the upwind transport scheme. If the upwind scheme is used with the B grid, the B grid velocities, :math:`uvelU` and :math:`vvelU` (respectively :math:`u` and :math:`v` at the U point) are interpolated to the E and N points first. (Note however that the upwind scheme does not transport all potentially available tracers.) + +- The remapping scheme uses :math:`uvelU` and :math:`vvelU` if l_fixed_area is false and :math:`uvelE` and :math:`vvelN` if l_fixed_area is true. l_fixed_area is hardcoded to false by default and further described below. As such, the B grid velocities (:math:`uvelU` and :math:`vvelU`) are used directly in the remapping scheme, while the C grid velocities (:math:`uvelE` and :math:`vvelN`) are interpolated to U points first. If l_fixed_area is changed to true, then the reverse is true. The C grid velocities are used directly and the B grid velocities are interpolated. The remapping scheme has several desirable features: @@ -464,14 +467,14 @@ In general, the fluxes in this expression are not equal to those implied by the above scheme for locating departure regions. For some applications it may be desirable to prescribe the divergence by prescribing the area of the departure region for each edge. This can be -done in CICE 4.0 by setting `l\_fixed\_area` = true in +done by setting `l\_fixed\_area` = true in **ice\_transport\_driver.F90** and passing the prescribed departure areas (`edgearea\_e` and `edgearea\_n`) into the remapping routine. An extra triangle is then constructed for each departure region to ensure that the total area is equal to the prescribed value. This idea was suggested and first implemented by Mats Bentsen of the Nansen Environmental and Remote Sensing Center (Norway), who applied an earlier version of the -CICE remapping scheme to an ocean model. The implementation in CICE v4.0 +CICE remapping scheme to an ocean model. The implementation in CICE is somewhat more general, allowing for departure regions lying on both sides of a cell edge. The extra triangle is constrained to lie in one but not both of the grid cells that share the edge. Since this option diff --git a/icepack b/icepack index cabfe0f11..599955139 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit cabfe0f111b61057db16cd8891f83ea4ad447a8a +Subproject commit 5999551399b49587b78411c2b0c8c1b9473b1f9b From 4befa1e4e884899d5140a748fdf8fa997d04d08b Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Tue, 6 Dec 2022 21:03:10 -0700 Subject: [PATCH 43/57] Saltflux option for CICE (#799) * Bring Consortium main into CESM * Comment out OMP initialization * Update wave-ice coupling in NUOPC/CMEPS driver * Update saltflux option to be constant and use ice_ref_salinity * Fix a couple inconsistencies with CICE Consortium master * Fix some issues * Fix some merge issues * Fix some merge issues * Fix ACC directive * Bug fixes with eap initialization * Fix evp initialization * Add warning when ktherm=1 and saltflux_option not constant * Comment out Tair initialization * Add CESMCOUPLED around OMP initialization * CESM File unit handling * Change icepack hash for PR * Some cleanup things * Move nu_diag_set to CESMCOUPLED region * Put Tair initialization in a CESMCOUPLED * New saltflux tests * Update icepack for saltflux option * Update icepack --- cicecore/cicedyn/analysis/ice_diagnostics.F90 | 45 +++++++++++++++---- cicecore/cicedyn/analysis/ice_history.F90 | 17 +++++-- .../cicedyn/analysis/ice_history_shared.F90 | 2 +- cicecore/cicedyn/dynamics/ice_dyn_evp_1d.F90 | 2 +- cicecore/cicedyn/general/ice_init.F90 | 37 +++++++++++---- cicecore/cicedyn/infrastructure/ice_grid.F90 | 35 ++++++++------- .../infrastructure/io/io_pio2/ice_restart.F90 | 8 ---- cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 | 4 +- cicecore/shared/ice_fileunits.F90 | 13 +++++- configuration/scripts/ice_in | 2 + .../scripts/options/set_nml.saltflux | 2 + configuration/scripts/tests/base_suite.ts | 1 + doc/source/cice_index.rst | 3 +- doc/source/user_guide/ug_case_settings.rst | 3 ++ icepack | 2 +- 15 files changed, 126 insertions(+), 50 deletions(-) create mode 100644 configuration/scripts/options/set_nml.saltflux diff --git a/cicecore/cicedyn/analysis/ice_diagnostics.F90 b/cicecore/cicedyn/analysis/ice_diagnostics.F90 index 8879d6632..53631b2d4 100644 --- a/cicecore/cicedyn/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedyn/analysis/ice_diagnostics.F90 @@ -87,6 +87,8 @@ module ice_diagnostics totms , & ! total ice/snow water mass (sh) totmin , & ! total ice water mass (nh) totmis , & ! total ice water mass (sh) + totsn , & ! total salt mass (nh) + totss , & ! total salt mass (sh) toten , & ! total ice/snow energy (J) totes ! total ice/snow energy (J) @@ -154,7 +156,7 @@ subroutine runtime_diags (dt) rhofresh, lfresh, lvap, ice_ref_salinity, Tffresh character (len=char_len) :: & - snwredist + snwredist, saltflux_option ! hemispheric state quantities real (kind=dbl_kind) :: & @@ -162,6 +164,8 @@ subroutine runtime_diags (dt) umaxs, hmaxs, shmaxs, areas, snwmxs, extents, shmaxst, & etotn, mtotn, micen, msnwn, pmaxn, ketotn, & etots, mtots, mices, msnws, pmaxs, ketots, & + stotn, & + stots, & urmsn, albtotn, arean_alb, mpndn, ptotn, spondn, & urmss, albtots, areas_alb, mpnds, ptots, sponds @@ -226,7 +230,7 @@ subroutine runtime_diags (dt) awtvdr_out=awtvdr, awtidr_out=awtidr, awtvdf_out=awtvdf, awtidf_out=awtidf, & rhofresh_out=rhofresh, lfresh_out=lfresh, lvap_out=lvap, & ice_ref_salinity_out=ice_ref_salinity,snwredist_out=snwredist, & - snwgrain_out=snwgrain) + snwgrain_out=snwgrain, saltflux_option_out=saltflux_option) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) @@ -512,6 +516,15 @@ subroutine runtime_diags (dt) etots = global_sum(work1, distrb_info, & field_loc_center, tareas) + ! total salt volume + call total_salt (work2) + + stotn = global_sum(work2, distrb_info, & + field_loc_center, tarean) + stots = global_sum(work2, distrb_info, & + field_loc_center, tareas) + + !----------------------------------------------------------------- ! various fluxes !----------------------------------------------------------------- @@ -785,12 +798,22 @@ subroutine runtime_diags (dt) swerrs = (fswnets - fswdns) / (fswnets - c1) ! salt mass - msltn = micen*ice_ref_salinity*p001 - mslts = mices*ice_ref_salinity*p001 + if (saltflux_option == 'prognostic') then + ! compute the total salt mass + msltn = stotn*rhoi*p001 + mslts = stots*rhoi*p001 + + ! change in salt mass + delmsltn = rhoi*(stotn-totsn)*p001 + delmslts = rhoi*(stots-totss)*p001 + else + msltn = micen*ice_ref_salinity*p001 + mslts = mices*ice_ref_salinity*p001 - ! change in salt mass - delmsltn = delmxn*ice_ref_salinity*p001 - delmslts = delmxs*ice_ref_salinity*p001 + ! change in salt mass + delmsltn = delmxn*ice_ref_salinity*p001 + delmslts = delmxs*ice_ref_salinity*p001 + endif ! salt error serrn = (sfsaltn + delmsltn) / (msltn + c1) @@ -1275,7 +1298,7 @@ subroutine init_mass_diags rhoi, rhos, rhofresh real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 + work1, work2 character(len=*), parameter :: subname = '(init_mass_diags)' @@ -1310,6 +1333,12 @@ subroutine init_mass_diags toten = global_sum(work1, distrb_info, field_loc_center, tarean) totes = global_sum(work1, distrb_info, field_loc_center, tareas) + ! north/south salt + call total_salt (work2) + totsn = global_sum(work2, distrb_info, field_loc_center, tarean) + totss = global_sum(work2, distrb_info, field_loc_center, tareas) + + if (print_points) then do n = 1, npnt if (my_task == pmloc(n)) then diff --git a/cicecore/cicedyn/analysis/ice_history.F90 b/cicecore/cicedyn/analysis/ice_history.F90 index 70288fc6f..9ba5cf4d4 100644 --- a/cicecore/cicedyn/analysis/ice_history.F90 +++ b/cicecore/cicedyn/analysis/ice_history.F90 @@ -2164,12 +2164,13 @@ subroutine accum_hist (dt) real (kind=dbl_kind) :: awtvdr, awtidr, awtvdf, awtidf, puny, secday, rad_to_deg real (kind=dbl_kind) :: Tffresh, rhoi, rhos, rhow, ice_ref_salinity - real (kind=dbl_kind) :: rho_ice, rho_ocn, Tice, Sbr, phi, rhob, dfresh, dfsalt + real (kind=dbl_kind) :: rho_ice, rho_ocn, Tice, Sbr, phi, rhob, dfresh, dfsalt, sicen logical (kind=log_kind) :: formdrag, skl_bgc logical (kind=log_kind) :: tr_pond, tr_aero, tr_brine, tr_snow integer (kind=int_kind) :: ktherm integer (kind=int_kind) :: nt_sice, nt_qice, nt_qsno, nt_iage, nt_FY, nt_Tsfc, & nt_alvl, nt_vlvl + character (len=char_len) :: saltflux_option type (block) :: & this_block ! block information for current block @@ -2181,6 +2182,7 @@ subroutine accum_hist (dt) call icepack_query_parameters(Tffresh_out=Tffresh, rhoi_out=rhoi, rhos_out=rhos, & rhow_out=rhow, ice_ref_salinity_out=ice_ref_salinity) call icepack_query_parameters(formdrag_out=formdrag, skl_bgc_out=skl_bgc, ktherm_out=ktherm) + call icepack_query_parameters(saltflux_option_out=saltflux_option) call icepack_query_tracer_flags(tr_pond_out=tr_pond, tr_aero_out=tr_aero, & tr_brine_out=tr_brine, tr_snow_out=tr_snow) call icepack_query_tracer_indices(nt_sice_out=nt_sice, nt_qice_out=nt_qice, & @@ -2265,7 +2267,7 @@ subroutine accum_hist (dt) !--------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & - !$OMP k,n,qn,ns,sn,rho_ocn,rho_ice,Tice,Sbr,phi,rhob,dfresh,dfsalt, & + !$OMP k,n,qn,ns,sn,rho_ocn,rho_ice,Tice,Sbr,phi,rhob,dfresh,dfsalt,sicen, & !$OMP worka,workb,worka3,Tinz4d,Sinz4d,Tsnz4d) do iblk = 1, nblocks @@ -3224,7 +3226,16 @@ subroutine accum_hist (dt) dfresh = -rhoi*frazil(i,j,iblk)/dt endif endif - dfsalt = ice_ref_salinity*p001*dfresh + if (saltflux_option == 'prognostic') then + sicen = c0 + do k = 1, nzilyr + sicen = sicen + trcr(i,j,nt_sice+k-1,iblk)*vice(i,j,iblk) & + / real(nzilyr,kind=dbl_kind) + enddo + dfsalt = sicen*p001*dfresh + else + dfsalt = ice_ref_salinity*p001*dfresh + endif worka(i,j) = aice(i,j,iblk)*(fsalt(i,j,iblk)+dfsalt) endif enddo diff --git a/cicecore/cicedyn/analysis/ice_history_shared.F90 b/cicecore/cicedyn/analysis/ice_history_shared.F90 index d9c62edde..70aa5e14c 100644 --- a/cicecore/cicedyn/analysis/ice_history_shared.F90 +++ b/cicecore/cicedyn/analysis/ice_history_shared.F90 @@ -40,7 +40,7 @@ module ice_history_shared logical (kind=log_kind), public :: & hist_avg ! if true, write averaged data instead of snapshots - character (len=char_len), public :: & + character (len=char_len_long), public :: & history_file , & ! output file for history incond_file ! output file for snapshot initial conditions diff --git a/cicecore/cicedyn/dynamics/ice_dyn_evp_1d.F90 b/cicecore/cicedyn/dynamics/ice_dyn_evp_1d.F90 index e874611bd..b7daab0a0 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_evp_1d.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_evp_1d.F90 @@ -889,7 +889,7 @@ subroutine evp1d_halo_update(NAVEL_len, lb, ub, uvel, vvel, & #ifdef _OPENACC !$acc parallel & - !$acc present(uvel, vvel) & + !$acc present(uvel, vvel) !$acc loop do iw = 1, NAVEL_len if (halo_parent(iw) == 0) cycle diff --git a/cicecore/cicedyn/general/ice_init.F90 b/cicecore/cicedyn/general/ice_init.F90 index 2bfe0d8e5..2bebd32f6 100644 --- a/cicecore/cicedyn/general/ice_init.F90 +++ b/cicecore/cicedyn/general/ice_init.F90 @@ -17,11 +17,11 @@ module ice_init use ice_constants, only: c0, c1, c2, c3, c5, c12, p2, p3, p5, p75, p166, & cm_to_m use ice_exit, only: abort_ice - use ice_fileunits, only: nu_nml, nu_diag, nu_diag_set, nml_filename, diag_type, & + use ice_fileunits, only: nu_nml, nu_diag, nml_filename, diag_type, & ice_stdout, get_fileunit, release_fileunit, bfbflag, flush_fileunit, & ice_IOUnitsMinUnit, ice_IOUnitsMaxUnit #ifdef CESMCOUPLED - use ice_fileunits, only: inst_suffix + use ice_fileunits, only: inst_suffix, nu_diag_set #endif use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_aggregate @@ -151,7 +151,7 @@ subroutine input_data kitd, kcatbound, ktransport character (len=char_len) :: shortwave, albedo_type, conduct, fbot_xfer_type, & - tfrz_option, frzpnd, atmbndy, wave_spec_type, snwredist, snw_aging_table, & + tfrz_option, saltflux_option, frzpnd, atmbndy, wave_spec_type, snwredist, snw_aging_table, & capping_method logical (kind=log_kind) :: calc_Tsfc, formdrag, highfreq, calc_strair, wave_spec, & @@ -163,7 +163,7 @@ subroutine input_data integer (kind=int_kind) :: numin, numax ! unit number limits integer (kind=int_kind) :: rplvl, rptopo - real (kind=dbl_kind) :: Cf, ksno, puny + real (kind=dbl_kind) :: Cf, ksno, puny, ice_ref_salinity character (len=char_len) :: abort_list character (len=128) :: tmpstr2 @@ -260,6 +260,7 @@ subroutine input_data highfreq, natmiter, atmiter_conv, calc_dragio, & ustar_min, emissivity, iceruf, iceruf_ocn, & fbot_xfer_type, update_ocn_f, l_mpond_fresh, tfrz_option, & + saltflux_option,ice_ref_salinity, & oceanmixed_ice, restore_ice, restore_ocn, trestore, & precip_units, default_season, wave_spec_type,nfreq, & atm_data_type, ocn_data_type, bgc_data_type, fe_data_type, & @@ -497,6 +498,8 @@ subroutine input_data precip_units = 'mks' ! 'mm_per_month' or ! 'mm_per_sec' = 'mks' = kg/m^2 s tfrz_option = 'mushy' ! freezing temp formulation + saltflux_option = 'constant' ! saltflux calculation + ice_ref_salinity = 4.0_dbl_kind ! Ice reference salinity for coupling oceanmixed_ice = .false. ! if true, use internal ocean mixed layer wave_spec_type = 'none' ! type of wave spectrum forcing nfreq = 25 ! number of wave frequencies @@ -758,8 +761,8 @@ subroutine input_data ! each task gets unique ice log filename when if test is true, for debugging if (1 == 0) then call get_fileUnit(nu_diag) - write(tmpstr,'(a,i4.4)') "ice.log.task_",my_task - open(nu_diag,file=tmpstr) + write(tmpstr2,'(a,i4.4)') "ice.log.task_",my_task + open(nu_diag,file=tmpstr2) endif end if if (trim(ice_ic) /= 'default' .and. & @@ -979,6 +982,8 @@ subroutine input_data call broadcast_scalar(wave_spec_file, master_task) call broadcast_scalar(nfreq, master_task) call broadcast_scalar(tfrz_option, master_task) + call broadcast_scalar(saltflux_option, master_task) + call broadcast_scalar(ice_ref_salinity, master_task) call broadcast_scalar(ocn_data_format, master_task) call broadcast_scalar(bgc_data_type, master_task) call broadcast_scalar(fe_data_type, master_task) @@ -1414,6 +1419,12 @@ subroutine input_data write(nu_diag,*) subname//' WARNING: For consistency, set tfrz_option = mushy' endif endif + if (ktherm == 1 .and. trim(saltflux_option) /= 'constant') then + if (my_task == master_task) then + write(nu_diag,*) subname//' WARNING: ktherm = 1 and saltflux_option = ',trim(saltflux_option) + write(nu_diag,*) subname//' WARNING: For consistency, set saltflux_option = constant' + endif + endif !tcraig if (ktherm == 1 .and. .not.sw_redist) then if (my_task == master_task) then @@ -1974,6 +1985,10 @@ subroutine input_data write(nu_diag,*) ' WARNING: will impact ocean forcing interaction' write(nu_diag,*) ' WARNING: coupled forcing will be modified by mixed layer routine' endif + write(nu_diag,1030) ' saltflux_option = ', trim(saltflux_option) + if (trim(saltflux_option) == 'constant') then + write(nu_diag,1002) ' ice_ref_salinity = ',ice_ref_salinity + endif if (trim(tfrz_option) == 'minus1p8') then tmpstr2 = ' : constant ocean freezing temperature (-1.8C)' elseif (trim(tfrz_option) == 'linear_salt') then @@ -2378,6 +2393,7 @@ subroutine input_data wave_spec_type_in = wave_spec_type, & wave_spec_in=wave_spec, nfreq_in=nfreq, & tfrz_option_in=tfrz_option, kalg_in=kalg, fbot_xfer_type_in=fbot_xfer_type, & + saltflux_option_in=saltflux_option, ice_ref_salinity_in=ice_ref_salinity, & Pstar_in=Pstar, Cstar_in=Cstar, iceruf_in=iceruf, iceruf_ocn_in=iceruf_ocn, calc_dragio_in=calc_dragio, & windmin_in=windmin, drhosdwind_in=drhosdwind, & rsnw_fall_in=rsnw_fall, rsnw_tmax_in=rsnw_tmax, rhosnew_in=rhosnew, & @@ -2793,7 +2809,7 @@ subroutine set_state_var (nx_block, ny_block, & indxi, indxj ! compressed indices for cells with aicen > puny real (kind=dbl_kind) :: & - Tsfc, sum, hbar, abar, puny, rhos, Lfresh, rad_to_deg, rsnw_fall, dist_ratio + Tsfc, sum, hbar, abar, puny, rhos, Lfresh, rad_to_deg, rsnw_fall, dist_ratio, Tffresh real (kind=dbl_kind), dimension(ncat) :: & ainit, hinit ! initial area, thickness @@ -2835,7 +2851,7 @@ subroutine set_state_var (nx_block, ny_block, & nt_smice_out=nt_smice, nt_smliq_out=nt_smliq, & nt_rhos_out=nt_rhos, nt_rsnw_out=nt_rsnw) call icepack_query_parameters(rhos_out=rhos, Lfresh_out=Lfresh, puny_out=puny, & - rad_to_deg_out=rad_to_deg, rsnw_fall_out=rsnw_fall) + rad_to_deg_out=rad_to_deg, rsnw_fall_out=rsnw_fall, Tffresh_out=Tffresh) call icepack_query_parameters(secday_out=secday, pi_out=pi) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -3073,7 +3089,12 @@ subroutine set_state_var (nx_block, ny_block, & do i = ilo, ihi if (tmask(i,j)) then ! place ice in high latitudes where ocean sfc is cold +#ifdef CESMCOUPLED + ! Option to use Tair instead. + if ( (Tair (i,j) <= Tffresh) .and. & +#else if ( (sst (i,j) <= Tf(i,j)+p2) .and. & +#endif (TLAT(i,j) < edge_init_sh/rad_to_deg .or. & TLAT(i,j) > edge_init_nh/rad_to_deg) ) then icells = icells + 1 diff --git a/cicecore/cicedyn/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 index dfccdd413..d193eca02 100644 --- a/cicecore/cicedyn/infrastructure/ice_grid.F90 +++ b/cicecore/cicedyn/infrastructure/ice_grid.F90 @@ -507,23 +507,26 @@ subroutine init_grid2 ! Diagnose OpenMP thread schedule, force order in output !----------------------------------------------------------------- +! This code does not work in CESM. Needs to be investigated further. +#ifndef CESMCOUPLED #if defined (_OPENMP) - !$OMP PARALLEL DO ORDERED PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - if (my_task == master_task) then - !$OMP ORDERED - if (iblk == 1) then - call omp_get_schedule(ompsk,ompcs) - write(nu_diag,*) '' - write(nu_diag,*) subname,' OpenMP runtime thread schedule:' - write(nu_diag,*) subname,' omp schedule = ',ompsk,ompcs - endif - write(nu_diag,*) subname,' block, thread = ',iblk,OMP_GET_THREAD_NUM() - call flush_fileunit(nu_diag) - !$OMP END ORDERED - endif - enddo - !$OMP END PARALLEL DO + !$OMP PARALLEL DO ORDERED PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + if (my_task == master_task) then + !$OMP ORDERED + if (iblk == 1) then + call omp_get_schedule(ompsk,ompcs) +! write(nu_diag,*) '' + write(nu_diag,*) subname,' OpenMP runtime thread schedule:' + write(nu_diag,*) subname,' omp schedule = ',ompsk,ompcs + endif + write(nu_diag,*) subname,' block, thread = ',iblk,OMP_GET_THREAD_NUM() + call flush_fileunit(nu_diag) + !$OMP END ORDERED + endif + enddo + !$OMP END PARALLEL DO +#endif #endif !----------------------------------------------------------------- diff --git a/cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 index 10fcf8b81..7019f7128 100644 --- a/cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 @@ -749,10 +749,6 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & ! if (ndim3 == ncat .and. ncat>1) then if (ndim3 == ncat .and. ndims == 3) then call pio_read_darray(File, vardesc, iodesc3d_ncat, work, status) -!#ifndef CESM1_PIO -!! This only works for PIO2 -! where (work == PIO_FILL_DOUBLE) work = c0 -!#endif if (present(field_loc)) then do n=1,ndim3 call ice_HaloUpdate (work(:,:,n,:), halo_info, & @@ -762,10 +758,6 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & ! elseif (ndim3 == 1) then elseif (ndim3 == 1 .and. ndims == 2) then call pio_read_darray(File, vardesc, iodesc2d, work, status) -!#ifndef CESM1_PIO -!! This only works for PIO2 -! where (work == PIO_FILL_DOUBLE) work = c0 -!#endif if (present(field_loc)) then call ice_HaloUpdate (work(:,:,1,:), halo_info, & field_loc, field_type) diff --git a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 index 091a948bb..0ba672f3d 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 @@ -107,8 +107,8 @@ subroutine cice_init2() call init_hist (dt) ! initialize output history file if (kdyn == 1) then - call init_evp ! allocate dyn_evp arrays - if (kdyn == 2) then + call init_evp ! define evp dynamics parameters, variables + elseif (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/shared/ice_fileunits.F90 b/cicecore/shared/ice_fileunits.F90 index 1854dda64..c8ca3a937 100644 --- a/cicecore/shared/ice_fileunits.F90 +++ b/cicecore/shared/ice_fileunits.F90 @@ -80,8 +80,10 @@ module ice_fileunits integer (kind=int_kind), public :: & nu_diag = ice_stdout ! diagnostics output file, unit number may be overwritten +#ifdef CESMCOUPLED logical (kind=log_kind), public :: & nu_diag_set = .false. ! flag to indicate whether nu_diag is already set +#endif integer (kind=int_kind), public :: & ice_IOUnitsMinUnit = 11, & ! do not use unit numbers below @@ -116,7 +118,11 @@ subroutine init_fileunits ice_IOUnitsInUse(ice_stdout) = .true. ! reserve unit 6 ice_IOUnitsInUse(ice_stderr) = .true. if (nu_diag >= 1 .and. nu_diag <= ice_IOUnitsMaxUnit) & - ice_IOUnitsInUse(nu_diag) = .true. ! reserve unit nu_diag + ice_IOUnitsInUse(nu_diag) = .true. ! reserve unit nu_diag +#ifdef CESMCOUPLED + ! CESM can have negative unit numbers. + if (nu_diag < 0) nu_diag_set = .true. +#endif call get_fileunit(nu_grid) call get_fileunit(nu_kmt) @@ -239,7 +245,12 @@ subroutine release_all_fileunits call release_fileunit(nu_rst_pointer) call release_fileunit(nu_history) call release_fileunit(nu_hdr) +#ifdef CESMCOUPLED + ! CESM can have negative unit numbers + if (nu_diag > 0 .and. nu_diag /= ice_stdout) call release_fileunit(nu_diag) +#else if (nu_diag /= ice_stdout) call release_fileunit(nu_diag) +#endif end subroutine release_all_fileunits diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 8262f34ec..32db0270b 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -254,6 +254,8 @@ update_ocn_f = .false. l_mpond_fresh = .false. tfrz_option = 'mushy' + saltflux_option = 'constant' + ice_ref_salinity = 4.0 oceanmixed_ice = .true. wave_spec_type = 'none' wave_spec_file = 'unknown_wave_spec_file' diff --git a/configuration/scripts/options/set_nml.saltflux b/configuration/scripts/options/set_nml.saltflux new file mode 100644 index 000000000..d50ddc4e3 --- /dev/null +++ b/configuration/scripts/options/set_nml.saltflux @@ -0,0 +1,2 @@ + ktherm = 2 + saltflux_option = 'prognostic' diff --git a/configuration/scripts/tests/base_suite.ts b/configuration/scripts/tests/base_suite.ts index b659fcb19..8685ab9a8 100644 --- a/configuration/scripts/tests/base_suite.ts +++ b/configuration/scripts/tests/base_suite.ts @@ -22,6 +22,7 @@ restart gx3 4x4 alt04 restart gx3 4x4 alt05 restart gx3 8x2 alt06 restart gx3 8x3 alt07 +restart gx3 8x3 saltflux restart gx3 18x2 debug,maskhalo restart gx3 6x2 alt01,debug,short restart gx3 8x2 alt02,debug,short diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index a8b9d08f1..0e9d21517 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -350,7 +350,7 @@ either Celsius or Kelvin units). Deprecated parameters are listed at the end. "ice_ic", "choice of initial conditions (see :ref:`tab-ic`)", "" "ice_stdout", "unit number for standard output", "" "ice_stderr", "unit number for standard error output", "" - "ice_ref_salinity", "reference salinity for ice–ocean exchanges", "4. ppt" + "ice_ref_salinity", "reference salinity for ice–ocean exchanges", "" "icells", "number of grid cells with specified property (for vectorization)", "" "iceruf", "ice surface roughness at atmosphere interface", "5.\ :math:`\times`\ 10\ :math:`^{-4}` m" "iceruf_ocn", "under-ice roughness (at ocean interface)", "0.03 m" @@ -677,6 +677,7 @@ either Celsius or Kelvin units). Deprecated parameters are listed at the end. "Tf", "freezing temperature", "C" "Tffresh", "freezing temp of fresh ice", "273.15 K" "tfrz_option", "form of ocean freezing temperature", "" + "saltflux_option", "form of coupled salt flux ", "" "thinS", "minimum ice thickness for brine tracer", "" "timer_stats", "logical to turn on extra timer statistics", ".false." "timesecs", "total elapsed time in seconds", "s" diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 74f8ab11f..587adcd56 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -644,6 +644,7 @@ forcing_nml "", "``eastblock``", "ice block covering about 25 percent of domain at the east edge of the domain", "" "", "``latsst``", "ice dependent on latitude and ocean temperature", "" "", "``uniform``", "ice defined at all grid points", "" + "``ice_ref_salinity``", "real", "sea ice salinity for coupling fluxes (ppt)", "4.0" "``iceruf``", "real", "ice surface roughness at atmosphere interface in meters", "0.0005" "``l_mpond_fresh``", "``.false.``", "release pond water immediately to ocean", "``.false.``" "", "``true``", "retain (topo) pond water until ponds drain", "" @@ -666,6 +667,8 @@ forcing_nml "``restore_ocn``", "logical", "restore sst to data", "``.false.``" "``restore_ice``", "logical", "restore ice state along lateral boundaries", "``.false.``" "``rotate_wind``", "logical", "rotate wind from east/north to computation grid", "``.true.``" + "``saltflux_option``", "``constant``", "computed using ice_ref_salinity", "``constant``" + "", "``prognostic``", "computed using prognostic salinity", "" "``tfrz_option``", "``linear_salt``", "linear function of salinity (ktherm=1)", "``mushy``" "", "``minus1p8``", "constant ocean freezing temperature (:math:`-1.8^{\circ} C`)", "" "", "``mushy``", "matches mushy-layer thermo (ktherm=2)", "" diff --git a/icepack b/icepack index 599955139..8637a2d28 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 5999551399b49587b78411c2b0c8c1b9473b1f9b +Subproject commit 8637a2d287f078c267f7912753919e54f188c434 From 48cf07a336210dcd810574a1f64130555afeb66e Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 6 Dec 2022 20:04:02 -0800 Subject: [PATCH 44/57] Update stress12T calculation for C-grid (#802) * Update stress12T calculation for C-grid stress12T was zero. Compute stress12T in subroutine stressC_T after computing estimate of shearT by averaging shearU. minor updates to some indentation and variable names. * Update calc of stress12T and shearT for C-grid for performance, changes answers --- cicecore/cicedyn/dynamics/ice_dyn_evp.F90 | 89 +++++++++++++---------- 1 file changed, 52 insertions(+), 37 deletions(-) diff --git a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 index 69305e131..cf111cccf 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 @@ -945,7 +945,8 @@ subroutine evp (dt) uarea (:,:,iblk), DminTarea (:,:,iblk), & strength (:,:,iblk), shearU (:,:,iblk), & zetax2T (:,:,iblk), etax2T (:,:,iblk), & - stresspT (:,:,iblk), stressmT (:,:,iblk)) + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12T (:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -1730,7 +1731,8 @@ subroutine stressC_T (nx_block, ny_block , & uarea , DminTarea , & strength , shearU , & zetax2T , etax2T , & - stressp , stressm ) + stresspT , stressmT , & + stress12T) use ice_dyn_shared, only: strain_rates_T, capping, & visc_replpress, e_factor @@ -1744,24 +1746,25 @@ subroutine stressC_T (nx_block, ny_block , & indxTj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the E point - uvelN , & ! x-component of velocity (m/s) at the N point - vvelN , & ! y-component of velocity (m/s) at the N point - dxN , & ! width of N-cell through the middle (m) - dyE , & ! height of E-cell through the middle (m) - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - strength , & ! ice strength (N/m) - shearU , & ! shearU local for this routine - uarea , & ! area of u cell - DminTarea ! deltaminEVP*tarea + uvelE , & ! x-component of velocity (m/s) at the E point + vvelE , & ! y-component of velocity (m/s) at the E point + uvelN , & ! x-component of velocity (m/s) at the N point + vvelN , & ! y-component of velocity (m/s) at the N point + dxN , & ! width of N-cell through the middle (m) + dyE , & ! height of E-cell through the middle (m) + dxT , & ! width of T-cell through the middle (m) + dyT , & ! height of T-cell through the middle (m) + strength , & ! ice strength (N/m) + shearU , & ! shearU local for this routine + uarea , & ! area of u cell + DminTarea ! deltaminEVP*tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - zetax2T , & ! zetax2 = 2*zeta (bulk viscosity) - etax2T , & ! etax2 = 2*eta (shear viscosity) - stressp , & ! sigma11+sigma22 - stressm ! sigma11-sigma22 + zetax2T , & ! zetax2 = 2*zeta (bulk viscosity) + etax2T , & ! etax2 = 2*eta (shear viscosity) + stresspT , & ! sigma11+sigma22 + stressmT , & ! sigma11-sigma22 + stress12T ! sigma12 ! local variables @@ -1769,12 +1772,14 @@ subroutine stressC_T (nx_block, ny_block , & i, j, ij real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - divT , & ! divergence at T point - tensionT ! tension at T point + divT , & ! divergence at T point + tensionT ! tension at T point real (kind=dbl_kind) :: & shearTsqr , & ! strain rates squared at T point + shearT , & ! strain rate at T point DeltaT , & ! delt at T point + uareaavgr , & ! 1 / uarea avg rep_prsT ! replacement pressure at T point character(len=*), parameter :: subname = '(stressC_T)' @@ -1801,11 +1806,19 @@ subroutine stressC_T (nx_block, ny_block , & ! U point values (Bouillon et al., 2013, Kimmritz et al., 2016 !----------------------------------------------------------------- + uareaavgr = c1/(uarea(i,j)+uarea(i,j-1)+uarea(i-1,j-1)+uarea(i-1,j)) + shearTsqr = (shearU(i ,j )**2 * uarea(i ,j ) & + shearU(i ,j-1)**2 * uarea(i ,j-1) & + shearU(i-1,j-1)**2 * uarea(i-1,j-1) & + shearU(i-1,j )**2 * uarea(i-1,j )) & - / (uarea(i,j)+uarea(i,j-1)+uarea(i-1,j-1)+uarea(i-1,j)) + * uareaavgr + + shearT = (shearU(i ,j ) * uarea(i ,j ) & + + shearU(i ,j-1) * uarea(i ,j-1) & + + shearU(i-1,j-1) * uarea(i-1,j-1) & + + shearU(i-1,j ) * uarea(i-1,j )) & + * uareaavgr DeltaT = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearTsqr)) @@ -1822,11 +1835,14 @@ subroutine stressC_T (nx_block, ny_block , & ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - stressp(i,j) = (stressp(i,j)*(c1-arlx1i*revp) & - + arlx1i*(zetax2T(i,j)*divT(i,j) - rep_prsT)) * denom1 + stresspT(i,j) = (stresspT (i,j)*(c1-arlx1i*revp) & + + arlx1i*(zetax2T(i,j)*divT(i,j) - rep_prsT)) * denom1 - stressm(i,j) = (stressm(i,j)*(c1-arlx1i*revp) & - + arlx1i*etax2T(i,j)*tensionT(i,j)) * denom1 + stressmT(i,j) = (stressmT (i,j)*(c1-arlx1i*revp) & + + arlx1i*etax2T(i,j)*tensionT(i,j)) * denom1 + + stress12T(i,j) = (stress12T(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2T(i,j)*shearT ) * denom1 enddo ! ij @@ -1851,7 +1867,7 @@ subroutine stressC_U (nx_block , ny_block ,& uarea , & etax2U , deltaU ,& strengthU, shearU ,& - stress12) + stress12U) use ice_dyn_shared, only: visc_replpress, & visc_method, deltaminEVP, capping @@ -1872,7 +1888,7 @@ subroutine stressC_U (nx_block , ny_block ,& strengthU ! ice strength at the U point real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - stress12 ! sigma12 + stress12U ! sigma12 ! local variables @@ -1891,15 +1907,15 @@ subroutine stressC_U (nx_block , ny_block ,& ! viscosities and replacement pressure at U point ! avg_zeta: Bouillon et al. 2013, C1 method of Kimmritz et al. 2016 ! avg_strength: C2 method of Kimmritz et al. 2016 - ! if outside do and stress12 equation repeated in each loop for performance + ! if outside do and stress12U equation repeated in each loop for performance !----------------------------------------------------------------- if (visc_method == 'avg_zeta') then do ij = 1, icellU i = indxUi(ij) j = indxUj(ij) - stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & - + arlx1i*p5*etax2U(i,j)*shearU(i,j)) * denom1 + stress12U(i,j) = (stress12U(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*etax2U(i,j)*shearU(i,j)) * denom1 enddo elseif (visc_method == 'avg_strength') then @@ -1911,8 +1927,8 @@ subroutine stressC_U (nx_block , ny_block ,& ! minimal extra calculations here even though it seems like there is call visc_replpress (strengthU(i,j), DminUarea, deltaU(i,j), & lzetax2U , letax2U , lrep_prsU , capping) - stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & - + arlx1i*p5*letax2U*shearU(i,j)) * denom1 + stress12U(i,j) = (stress12U(i,j)*(c1-arlx1i*revp) & + + arlx1i*p5*letax2U*shearU(i,j)) * denom1 enddo endif @@ -1976,7 +1992,7 @@ subroutine stressCD_T (nx_block, ny_block , & real (kind=dbl_kind), dimension (nx_block,ny_block) :: & divT , & ! divergence at T point tensionT , & ! tension at T point - shearT , & ! sheat at T point + shearT , & ! shear at T point DeltaT ! delt at T point real (kind=dbl_kind) :: & @@ -1991,7 +2007,7 @@ subroutine stressCD_T (nx_block, ny_block , & call strain_rates_T (nx_block , ny_block , & icellT , & - indxTi (:), indxTj (:) , & + indxTi(:) , indxTj (:) , & uvelE (:,:), vvelE (:,:), & uvelN (:,:), vvelN (:,:), & dxN (:,:), dyE (:,:), & @@ -2046,8 +2062,7 @@ subroutine stressCD_U (nx_block, ny_block, & stresspU , stressmU, & stress12U) - use ice_dyn_shared, only: strain_rates_U, & - visc_replpress, & + use ice_dyn_shared, only: visc_replpress, & visc_method, deltaminEVP, capping integer (kind=int_kind), intent(in) :: & @@ -2059,7 +2074,7 @@ subroutine stressCD_U (nx_block, ny_block, & indxUj ! compressed index in j-direction real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uarea , & ! area of U-cell (m^2) + uarea , & ! area of U-cell (m^2) zetax2U , & ! 2*zeta at U point etax2U , & ! 2*eta at U point strengthU, & ! ice strength at U point From f8132941081d565739d764bb38c6beb26cade22c Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Wed, 7 Dec 2022 15:00:52 -0800 Subject: [PATCH 45/57] Update version to CICE 6.4.1 (#803) Update Icepack to latest release version Remove trailing blank space --- cicecore/cicedyn/dynamics/ice_dyn_shared.F90 | 4 +- cicecore/cicedyn/dynamics/ice_dyn_vp.F90 | 6 +- cicecore/cicedyn/general/ice_forcing.F90 | 12 +-- cicecore/cicedyn/general/ice_init.F90 | 2 +- cicecore/cicedyn/infrastructure/ice_grid.F90 | 76 +++++++++---------- .../drivers/direct/hadgem3/CICE_InitMod.F90 | 2 +- .../direct/nemo_concepts/CICE_InitMod.F90 | 2 +- cicecore/drivers/mct/cesm1/CICE_InitMod.F90 | 2 +- .../drivers/nuopc/cmeps/ice_comp_nuopc.F90 | 2 +- cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 | 2 +- .../drivers/standalone/cice/CICE_InitMod.F90 | 2 +- .../unittest/gridavgchk/CICE_InitMod.F90 | 2 +- .../drivers/unittest/sumchk/CICE_InitMod.F90 | 2 +- cicecore/version.txt | 2 +- doc/source/conf.py | 4 +- icepack | 2 +- 16 files changed, 62 insertions(+), 62 deletions(-) diff --git a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 index 5e2757b93..a12e6fddd 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 @@ -181,7 +181,7 @@ subroutine alloc_dyn_shared iceTmask (nx_block,ny_block,max_blocks), & ! T mask for dynamics iceUmask (nx_block,ny_block,max_blocks), & ! U mask for dynamics fcor_blk (nx_block,ny_block,max_blocks), & ! Coriolis - DminTarea (nx_block,ny_block,max_blocks), & ! + DminTarea (nx_block,ny_block,max_blocks), & ! stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') @@ -240,7 +240,7 @@ subroutine init_dyn_shared (dt) call set_evp_parameters (dt) ! allocate dyn shared (init_uvel,init_vvel) - call alloc_dyn_shared + call alloc_dyn_shared ! Set halo_dynbundle, this is empirical at this point, could become namelist halo_dynbundle = .true. nprocs = get_num_procs() diff --git a/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 index 5a01f4308..3915004b4 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 @@ -88,7 +88,7 @@ module ice_dyn_vp reltol_andacc ! relative tolerance for Anderson acceleration character (len=char_len), public :: & - precond , & ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), + precond , & ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), ! 'pgmres' (Jacobi-preconditioned GMRES) algo_nonlin , & ! nonlinear algorithm: 'picard' (Picard iteration), 'anderson' (Anderson acceleration) ortho_type ! type of orthogonalization for FGMRES ('cgs' or 'mgs') @@ -3344,7 +3344,7 @@ subroutine pgmres (zetax2 , etax2 , & ! Update workspace with boundary values ! NOTE: skipped for efficiency since this is just a preconditioner - ! unless bfbflag is active + ! unless bfbflag is active if (bfbflag /= 'off') then call stack_fields(workspace_x, workspace_y, fld2) call ice_timer_start(timer_bound) @@ -3565,7 +3565,7 @@ subroutine precondition(zetax2 , etax2, & type (ice_halo), intent(in) :: & halo_info_mask ! ghost cell update info for masked halo - + real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & vx , & ! input vector (x components) vy ! input vector (y components) diff --git a/cicecore/cicedyn/general/ice_forcing.F90 b/cicecore/cicedyn/general/ice_forcing.F90 index 3a2d83530..541efb282 100755 --- a/cicecore/cicedyn/general/ice_forcing.F90 +++ b/cicecore/cicedyn/general/ice_forcing.F90 @@ -118,7 +118,7 @@ module ice_forcing real (kind=dbl_kind), dimension(:,:,:,:,:), allocatable :: & wave_spectrum_data ! field values at 2 temporal data points - + character(char_len), public :: & atm_data_format, & ! 'bin'=binary or 'nc'=netcdf ocn_data_format, & ! 'bin'=binary or 'nc'=netcdf @@ -5650,7 +5650,7 @@ subroutine get_wave_spec file=__FILE__, line=__LINE__) else #ifdef USE_NETCDF - call wave_spec_data + call wave_spec_data #else write (nu_diag,*) "wave spectrum file not available, requires cpp USE_NETCDF" write (nu_diag,*) "wave spectrum file not available, using default profile" @@ -5682,7 +5682,7 @@ subroutine wave_spec_data use ice_grid, only: hm, tlon, tlat, tmask, umask use ice_calendar, only: days_per_year, use_leap_years - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ncid , & ! netcdf file id i, j, freq , & ixm,ixx,ixp , & ! record numbers for neighboring months @@ -5710,7 +5710,7 @@ subroutine wave_spec_data wave_spectrum_profile ! wave spectrum character(len=64) :: fieldname !netcdf field name - character(char_len_long) :: spec_file + character(char_len_long) :: spec_file character(char_len) :: wave_spec_type logical (kind=log_kind) :: wave_spec character(len=*), parameter :: subname = '(wave_spec_data)' @@ -5736,7 +5736,7 @@ subroutine wave_spec_data yr = fyear ! current year !------------------------------------------------------------------- ! 6-hourly data - ! + ! ! Assume that the 6-hourly value is located at the end of the ! 6-hour period. This is the convention for NCEP reanalysis data. ! E.g. record 1 gives conditions at 6 am GMT on 1 January. @@ -5787,7 +5787,7 @@ subroutine wave_spec_data field_type=field_type_scalar) call ice_close_nc(ncid) - + ! Interpolate call interpolate_wavespec_data (wave_spectrum_data, wave_spectrum) diff --git a/cicecore/cicedyn/general/ice_init.F90 b/cicecore/cicedyn/general/ice_init.F90 index 2bebd32f6..03ebc0174 100644 --- a/cicecore/cicedyn/general/ice_init.F90 +++ b/cicecore/cicedyn/general/ice_init.F90 @@ -1543,7 +1543,7 @@ subroutine input_data write(nu_diag,*) subname//' WARNING: tr_fsd=T but wave_spec=F - not recommended' endif end if - + ! compute grid locations for thermo, u and v fields grid_ice_thrm = 'T' diff --git a/cicecore/cicedyn/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 index d193eca02..d6c612f00 100644 --- a/cicecore/cicedyn/infrastructure/ice_grid.F90 +++ b/cicecore/cicedyn/infrastructure/ice_grid.F90 @@ -1396,15 +1396,15 @@ subroutine rectgrid ! original rectgrid defines latlon first call rectgrid_scale_dxdy else - ! rectgrid no grid spacing. + ! rectgrid no grid spacing. ! original method with addition to use namelist lat/lon reference - + if (my_task == master_task) then work_g1 = c0 length = dxrect*cm_to_m/radius*rad_to_deg - + work_g1(1,:) = lonrefrect ! reference lon from namelist - + do j = 1, ny_global do i = 2, nx_global work_g1(i,j) = work_g1(i-1,j) + length ! ULON @@ -1416,13 +1416,13 @@ subroutine rectgrid field_loc_NEcorner, field_type_scalar) call ice_HaloExtrapolate(ULON, distrb_info, & ew_boundary_type, ns_boundary_type) - + if (my_task == master_task) then work_g1 = c0 length = dyrect*cm_to_m/radius*rad_to_deg - + work_g1(:,1) = latrefrect ! reference latitude from namelist - + do i = 1, nx_global do j = 2, ny_global work_g1(i,j) = work_g1(i,j-1) + length ! ULAT @@ -1535,32 +1535,32 @@ subroutine rectgrid end subroutine rectgrid !======================================================================= - + subroutine rectgrid_scale_dxdy - + ! generate a variable spaced rectangluar grid. ! extend spacing from center of grid outward. use ice_constants, only: c0, c1, c2, radius, cm_to_m, & field_loc_center, field_loc_NEcorner, field_type_scalar - + integer (kind=int_kind) :: & i, j, iblk, & imid, jmid, & center1, center2 ! array centers for expanding dx, dy - + real (kind=dbl_kind) :: & length, & rad_to_deg real (kind=dbl_kind), dimension(:,:), allocatable :: & work_g1 - + character(len=*), parameter :: subname = '(rectgrid_scale_dxdy)' - + call icepack_query_parameters(rad_to_deg_out=rad_to_deg) allocate(work_g1(nx_global,ny_global)) - + ! determine dx spacing ! strategy: initialize with dxrect. ! if want to scale the grid, work from center outwards, @@ -1568,51 +1568,51 @@ subroutine rectgrid_scale_dxdy ! this assumes dx varies in x direction only. ! (i.e, dx is the same across same y location) if (my_task == master_task) then - + ! initialize with initial dxrect work_g1(:,:) = dxrect - + ! check if nx is even or odd ! if even, middle 2 columns are center ! of odd, middle 1 column is center if (mod(nx_global,2) == 0) then ! nx_global is even - + ! with even number of x locatons, ! the center two y columns are center center1 = nx_global/2 ! integer math center2 = center1 + 1 ! integer math - + else ! nx_global = odd ! only one center index. set center2=center1 center1 = ceiling(real(nx_global/2),int_kind) center2 = center1 endif - + ! note loop over only half the x grid points (center1)-1 ! working from the center outward. do j = 1, ny_global do i = 1, center1-1 ! work from center1 to left work_g1(center1-i,j) = dxscale*work_g1(center1-i+1,j) - + ! work from center2 to right work_g1(center2+i,j) = dxscale*work_g1(center2+i-1,j) enddo ! i enddo ! j - + endif ! my_task == master_task - - + + ! note work_g1 is converted to meters in primary_grid_lengths_HTN call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE - + ! make ULON array if (my_task == master_task) then - + ! make first column reference lon in radians. ! the remaining work_g1 is still dx in meters work_g1(1,:) = lonrefrect/rad_to_deg ! radians - + ! loop over remaining points and add spacing to successive ! x locations do j = 1, ny_global @@ -1626,7 +1626,7 @@ subroutine rectgrid_scale_dxdy field_loc_NEcorner, field_type_scalar) call ice_HaloExtrapolate(ULON, distrb_info, & ew_boundary_type, ns_boundary_type) - + ! determine dy spacing ! strategy: initialize with dyrect. ! if want to scale the grid, work from center outwards, @@ -1634,7 +1634,7 @@ subroutine rectgrid_scale_dxdy ! this assumes dy varies in y direction only. ! (i.e, dy is the same across same x location) if (my_task == master_task) then - + ! initialize with initial dxrect work_g1(:,:) = dyrect @@ -1642,25 +1642,25 @@ subroutine rectgrid_scale_dxdy ! if even, middle 2 rows are center ! of odd, middle 1 row is center if (mod(ny_global,2) == 0) then ! ny_global is even - + ! with even number of x locatons, ! the center two y columns are center center1 = ny_global/2 ! integer math center2 = center1 + 1 ! integer math - + else ! ny_global = odd ! only one center index. set center2=center1 center1 = ceiling(real(ny_global/2),int_kind) center2 = center1 endif - + ! note loop over only half the y grid points (center1)-1 ! working from the center outward. do i = 1, nx_global do j = 1, center1-1 ! work from center1 to bottom work_g1(i,center1-j) = dyscale*work_g1(i,center1-j+1) - + ! work from center2 to top work_g1(i,center2+j) = dyscale*work_g1(i,center2+j-1) enddo ! i @@ -1668,15 +1668,15 @@ subroutine rectgrid_scale_dxdy endif ! mytask == master_task ! note work_g1 is converted to meters primary_grid_lengths_HTE call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE - + ! make ULAT array if (my_task == master_task) then - + ! make first row reference lat in radians. ! the remaining work_g1 is still dy in meters work_g1(:,1) = latrefrect/rad_to_deg ! radians - - + + ! loop over remaining points and add spacing to successive ! x locations do j = 2, ny_global ! start from j=2. j=1 is latrefrect @@ -1690,10 +1690,10 @@ subroutine rectgrid_scale_dxdy field_loc_NEcorner, field_type_scalar) call ice_HaloExtrapolate(ULAT, distrb_info, & ew_boundary_type, ns_boundary_type) - + deallocate(work_g1) - + end subroutine rectgrid_scale_dxdy !======================================================================= diff --git a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 index cd27f296e..85050d8c9 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 @@ -122,7 +122,7 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - if (kdyn == 1) then + if (kdyn == 1) then call init_evp else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 index cd27f296e..85050d8c9 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 @@ -122,7 +122,7 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - if (kdyn == 1) then + if (kdyn == 1) then call init_evp else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables diff --git a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 index 5ee070673..5efa18a28 100644 --- a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 @@ -135,7 +135,7 @@ subroutine cice_init(mpicom_ice) call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - if (kdyn == 1) then + if (kdyn == 1) then call init_evp else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables diff --git a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 index afdee5590..5f2995132 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 @@ -576,7 +576,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call t_startf ('cice_init1') call cice_init1 call t_stopf ('cice_init1') - + !----------------------------------------------------------------- ! Advertise fields !----------------------------------------------------------------- diff --git a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 index 02356e2ba..dc83c7703 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 @@ -144,7 +144,7 @@ subroutine cice_init(mpi_comm) call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - if (kdyn == 1) then + if (kdyn == 1) then call init_evp else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index c91dae4b4..8de05a121 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -132,7 +132,7 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - if (kdyn == 1) then + if (kdyn == 1) then call init_evp else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables diff --git a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 index c65f04150..9ed1c5cbc 100644 --- a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 @@ -123,7 +123,7 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - if (kdyn == 1) then + if (kdyn == 1) then call init_evp else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables diff --git a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 index f0877d502..8a5070d25 100644 --- a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 @@ -124,7 +124,7 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - if (kdyn == 1) then + if (kdyn == 1) then call init_evp else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables diff --git a/cicecore/version.txt b/cicecore/version.txt index 154cda3d7..953395fa1 100644 --- a/cicecore/version.txt +++ b/cicecore/version.txt @@ -1 +1 @@ -CICE 6.4.0 +CICE 6.4.1 diff --git a/doc/source/conf.py b/doc/source/conf.py index a1b2871ae..88b98bc09 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -62,9 +62,9 @@ # built documents. # # The short X.Y version. -version = u'6.4.0' +version = u'6.4.1' # The full version, including alpha/beta/rc tags. -version = u'6.4.0' +version = u'6.4.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/icepack b/icepack index 8637a2d28..8f96707a9 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 8637a2d287f078c267f7912753919e54f188c434 +Subproject commit 8f96707a90132ca119d81ed84e5a62ca0ff3ed96 From eebb35054a77b6062eaa12c2de39626ccdbeb8c1 Mon Sep 17 00:00:00 2001 From: daveh150 Date: Fri, 16 Dec 2022 16:48:36 -0600 Subject: [PATCH 46/57] Adding method to check namelist in any order, tested with NAG Fortran. (#801) * Adding method to check namelist in any order. Use subroutine in ice_namelist_mod.F90 to search for namelist in ice_in. * Moved goto_nml subroutine to ice_fileunits.F90. Removed ice_namelist_mod.F90 * Cleanup indentations with tmpstr2 use * Cleanup spacing and intentation * For namelist check, remove extra continuation after making ice_abort string. Co-authored-by: Tony Craig --- cicecore/cicedyn/analysis/ice_history.F90 | 32 ++- cicecore/cicedyn/analysis/ice_history_bgc.F90 | 31 ++- .../cicedyn/analysis/ice_history_drag.F90 | 30 ++- cicecore/cicedyn/analysis/ice_history_fsd.F90 | 30 ++- .../cicedyn/analysis/ice_history_mechred.F90 | 30 ++- .../cicedyn/analysis/ice_history_pond.F90 | 36 ++- .../cicedyn/analysis/ice_history_snow.F90 | 34 ++- cicecore/cicedyn/general/ice_init.F90 | 222 +++++++++++++----- .../cicedyn/infrastructure/ice_domain.F90 | 34 ++- cicecore/shared/ice_fileunits.F90 | 53 ++++- 10 files changed, 410 insertions(+), 122 deletions(-) diff --git a/cicecore/cicedyn/analysis/ice_history.F90 b/cicecore/cicedyn/analysis/ice_history.F90 index 9ba5cf4d4..f19158f6a 100644 --- a/cicecore/cicedyn/analysis/ice_history.F90 +++ b/cicecore/cicedyn/analysis/ice_history.F90 @@ -81,6 +81,7 @@ subroutine init_hist (dt) use ice_history_fsd, only: init_hist_fsd_2D, init_hist_fsd_3Df, & init_hist_fsd_4Df, f_afsd, f_afsdn use ice_restart_shared, only: restart + use ice_fileunits, only: goto_nml real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -104,7 +105,9 @@ subroutine init_hist (dt) cstr_got, cstr_gou, cstr_gov ! mask area name for t, u, v ocn grid (go) character (len=25) :: & gridstr2D, gridstr ! temporary string names - character(len=char_len) :: description + character(len=char_len) :: description + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=char_len) :: nml_name ! text namelist name character(len=*), parameter :: subname = '(init_hist)' @@ -228,24 +231,39 @@ subroutine init_hist (dt) file=__FILE__, line=__LINE__) if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_nml' + nml_name = 'icefields_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! open file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_nml open file '// & + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & trim(nml_filename), & file=__FILE__, line=__LINE__) endif + ! seek to this namelist + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) endif diff --git a/cicecore/cicedyn/analysis/ice_history_bgc.F90 b/cicecore/cicedyn/analysis/ice_history_bgc.F90 index 003e76120..6974a087b 100644 --- a/cicecore/cicedyn/analysis/ice_history_bgc.F90 +++ b/cicecore/cicedyn/analysis/ice_history_bgc.F90 @@ -271,6 +271,7 @@ subroutine init_hist_bgc_2D use ice_communicate, only: my_task, master_task use ice_history_shared, only: tstr2D, tcstr, define_hist_field, & f_fsalt, f_fsalt_ai, f_sice + use ice_fileunits, only: goto_nml integer (kind=int_kind) :: n, ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag @@ -283,6 +284,9 @@ subroutine init_hist_bgc_2D tr_bgc_DON, tr_bgc_Fe, tr_bgc_hum, & skl_bgc, solve_zsal, z_tracers + character(len=char_len) :: nml_name ! for namelist check + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=*), parameter :: subname = '(init_hist_bgc_2D)' call icepack_query_parameters(skl_bgc_out=skl_bgc, & @@ -305,24 +309,39 @@ subroutine init_hist_bgc_2D !----------------------------------------------------------------- if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_bgc_nml' + nml_name = 'icefields_bgc_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! check if can open file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_bgc_nml open file '// & + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & trim(nml_filename), & file=__FILE__, line=__LINE__) endif + ! seek to namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_bgc_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_bgc_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) endif diff --git a/cicecore/cicedyn/analysis/ice_history_drag.F90 b/cicecore/cicedyn/analysis/ice_history_drag.F90 index fba19b364..dd9e3cb59 100644 --- a/cicecore/cicedyn/analysis/ice_history_drag.F90 +++ b/cicecore/cicedyn/analysis/ice_history_drag.F90 @@ -64,10 +64,13 @@ subroutine init_hist_drag_2D use ice_calendar, only: nstreams use ice_communicate, only: my_task, master_task use ice_history_shared, only: tstr2D, tcstr, define_hist_field + use ice_fileunits, only: goto_nml integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag logical (kind=log_kind) :: formdrag + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=char_len) :: nml_name ! for namelist check character(len=*), parameter :: subname = '(init_hist_drag_2D)' @@ -81,24 +84,39 @@ subroutine init_hist_drag_2D !----------------------------------------------------------------- if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_drag_nml' + nml_name = 'icefields_drag_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! open namelist file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_drag_nml open file '// & + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & trim(nml_filename), & file=__FILE__, line=__LINE__) endif + ! go to this namelist + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_drag_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_drag_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) endif diff --git a/cicecore/cicedyn/analysis/ice_history_fsd.F90 b/cicecore/cicedyn/analysis/ice_history_fsd.F90 index b52db4e05..610f56608 100644 --- a/cicecore/cicedyn/analysis/ice_history_fsd.F90 +++ b/cicecore/cicedyn/analysis/ice_history_fsd.F90 @@ -76,10 +76,13 @@ subroutine init_hist_fsd_2D use ice_calendar, only: nstreams use ice_communicate, only: my_task, master_task use ice_history_shared, only: tstr2D, tcstr, define_hist_field + use ice_fileunits, only: goto_nml integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag logical (kind=log_kind) :: tr_fsd, wave_spec + character (len=char_len_long) :: tmpstr2 ! test namelist + character(len=char_len) :: nml_name ! text namelist name character(len=*), parameter :: subname = '(init_hist_fsd_2D)' @@ -96,24 +99,39 @@ subroutine init_hist_fsd_2D !----------------------------------------------------------------- if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_fsd_nml' + nml_name = 'icefields_fsd_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! open namelist file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_fsd_nml open file '// & + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & trim(nml_filename), & file=__FILE__, line=__LINE__) endif + ! goto this namelist + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_fsd_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_fsd_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) endif diff --git a/cicecore/cicedyn/analysis/ice_history_mechred.F90 b/cicecore/cicedyn/analysis/ice_history_mechred.F90 index 98c58bc39..e0d15fcf2 100644 --- a/cicecore/cicedyn/analysis/ice_history_mechred.F90 +++ b/cicecore/cicedyn/analysis/ice_history_mechred.F90 @@ -84,11 +84,14 @@ subroutine init_hist_mechred_2D use ice_calendar, only: nstreams, histfreq use ice_communicate, only: my_task, master_task use ice_history_shared, only: tstr2D, tcstr, define_hist_field + use ice_fileunits, only: goto_nml integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag real (kind=dbl_kind) :: secday logical (kind=log_kind) :: tr_lvl + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=char_len) :: nml_name ! for namelist check character(len=*), parameter :: subname = '(init_hist_mechred_2D)' @@ -103,24 +106,39 @@ subroutine init_hist_mechred_2D !----------------------------------------------------------------- if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_mechred_nml' + nml_name = 'icefields_mechred_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! open namelist file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_mechred_nml open file '// & + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & trim(nml_filename), & file=__FILE__, line=__LINE__) endif + ! goto this namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_mechred_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_mechred_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) endif diff --git a/cicecore/cicedyn/analysis/ice_history_pond.F90 b/cicecore/cicedyn/analysis/ice_history_pond.F90 index 8818ff94e..d209e6db6 100644 --- a/cicecore/cicedyn/analysis/ice_history_pond.F90 +++ b/cicecore/cicedyn/analysis/ice_history_pond.F90 @@ -69,10 +69,13 @@ subroutine init_hist_pond_2D use ice_calendar, only: nstreams, histfreq use ice_communicate, only: my_task, master_task use ice_history_shared, only: tstr2D, tcstr, define_hist_field + use ice_fileunits, only: goto_nml integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag logical (kind=log_kind) :: tr_pond + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=char_len) :: nml_name ! text namelist name character(len=*), parameter :: subname = '(init_hist_pond_2D)' @@ -86,24 +89,39 @@ subroutine init_hist_pond_2D !----------------------------------------------------------------- if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_pond_nml' + nml_name = 'icefields_pond_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! open namelist file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_pond_nml open file '// & - trim(nml_filename), & - file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & + trim(nml_filename), & + file=__FILE__, line=__LINE__) endif - + + ! goto this namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_pond_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_pond_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) endif diff --git a/cicecore/cicedyn/analysis/ice_history_snow.F90 b/cicecore/cicedyn/analysis/ice_history_snow.F90 index 0ec4144bf..62e65b5a3 100644 --- a/cicecore/cicedyn/analysis/ice_history_snow.F90 +++ b/cicecore/cicedyn/analysis/ice_history_snow.F90 @@ -69,6 +69,7 @@ subroutine init_hist_snow_2D (dt) use ice_history_shared, only: tstr2D, tcstr, define_hist_field use ice_fileunits, only: nu_nml, nml_filename, & get_fileunit, release_fileunit + use ice_fileunits, only: goto_nml real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -76,7 +77,10 @@ subroutine init_hist_snow_2D (dt) integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag real (kind=dbl_kind) :: rhofresh, secday - logical (kind=log_kind) :: tr_snow + logical (kind=log_kind) :: tr_snow + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=char_len) :: nml_name ! for namelist check + character(len=*), parameter :: subname = '(init_hist_snow_2D)' call icepack_query_tracer_flags(tr_snow_out=tr_snow) @@ -92,26 +96,42 @@ subroutine init_hist_snow_2D (dt) !----------------------------------------------------------------- if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_snow_nml' + nml_name = 'icefields_snow_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! open namelist file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_snow_nml open file '// & + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & trim(nml_filename), & file=__FILE__, line=__LINE__) endif + ! goto namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_snow_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_snow_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) + endif else ! .not. tr_snow diff --git a/cicecore/cicedyn/general/ice_init.F90 b/cicecore/cicedyn/general/ice_init.F90 index 03ebc0174..1d16d1ac2 100644 --- a/cicecore/cicedyn/general/ice_init.F90 +++ b/cicecore/cicedyn/general/ice_init.F90 @@ -124,6 +124,8 @@ subroutine input_data use ice_restoring, only: restore_ice use ice_timers, only: timer_stats use ice_memusage, only: memory_stats + use ice_fileunits, only: goto_nml + #ifdef CESMCOUPLED use shr_file_mod, only: shr_file_setIO #endif @@ -163,9 +165,11 @@ subroutine input_data integer (kind=int_kind) :: numin, numax ! unit number limits integer (kind=int_kind) :: rplvl, rptopo - real (kind=dbl_kind) :: Cf, ksno, puny, ice_ref_salinity + real (kind=dbl_kind) :: Cf, ksno, puny, ice_ref_salinity + character (len=char_len) :: abort_list - character (len=128) :: tmpstr2 + character (len=char_len) :: nml_name ! namelist name + character (len=char_len_long) :: tmpstr2 character(len=*), parameter :: subname='(input_data)' @@ -587,6 +591,7 @@ subroutine input_data if (my_task == master_task) then + ! open namelist file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then @@ -595,141 +600,228 @@ subroutine input_data file=__FILE__, line=__LINE__) endif - write(nu_diag,*) subname,' Reading setup_nml' - rewind(unit=nu_nml, iostat=nml_error) + ! read setup_nml + nml_name = 'setup_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! goto namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: setup_nml rewind ', & + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=setup_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: '//trim(nml_name)//' reading '// & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: setup_nml reading ', & - file=__FILE__, line=__LINE__) - endif - write(nu_diag,*) subname,' Reading grid_nml' - rewind(unit=nu_nml, iostat=nml_error) + ! read grid_nml + nml_name = 'grid_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! goto namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: grid_nml rewind ', & + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=grid_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' //trim(nml_name)//' reading '// & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: grid_nml reading ', & - file=__FILE__, line=__LINE__) - endif - write(nu_diag,*) subname,' Reading tracer_nml' - rewind(unit=nu_nml, iostat=nml_error) + ! read tracer_nml + nml_name = 'tracer_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! goto namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: tracer_nml rewind ', & + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=tracer_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' //trim(nml_name)//' reading '// & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: tracer_nml reading ', & - file=__FILE__, line=__LINE__) - endif - write(nu_diag,*) subname,' Reading thermo_nml' - rewind(unit=nu_nml, iostat=nml_error) + ! read thermo_nml + nml_name = 'thermo_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! goto namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: thermo_nml rewind ', & + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=thermo_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: '//trim(nml_name)//' reading '// & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: thermo_nml reading ', & - file=__FILE__, line=__LINE__) - endif - write(nu_diag,*) subname,' Reading dynamics_nml' - rewind(unit=nu_nml, iostat=nml_error) + ! read dynamics_nml + nml_name = 'dynamics_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + + ! goto namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: dynamics_nml rewind ', & + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=dynamics_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: '//trim(nml_name)//' reading '// & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: dynamics_nml reading ', & - file=__FILE__, line=__LINE__) - endif - write(nu_diag,*) subname,' Reading shortwave_nml' - rewind(unit=nu_nml, iostat=nml_error) + ! read shortwave_nml + nml_name = 'shortwave_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + + ! goto namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: shortwave_nml rewind ', & + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=shortwave_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: '//trim(nml_name)//' reading '//& + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: shortwave_nml reading ', & - file=__FILE__, line=__LINE__) - endif - write(nu_diag,*) subname,' Reading ponds_nml' - rewind(unit=nu_nml, iostat=nml_error) + ! read ponds_nml + nml_name = 'ponds_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + + ! goto namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: ponds_nml rewind ', & + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=ponds_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: '//trim(nml_name)//' reading '// & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: ponds_nml reading ', & - file=__FILE__, line=__LINE__) - endif - write(nu_diag,*) subname,' Reading snow_nml' - rewind(unit=nu_nml, iostat=nml_error) + ! read snow_nml + nml_name = 'snow_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + + ! goto namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: snow_nml rewind ', & + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=snow_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: '//trim(nml_name)//' reading '// & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: snow_nml reading ', & - file=__FILE__, line=__LINE__) - endif - write(nu_diag,*) subname,' Reading forcing_nml' - rewind(unit=nu_nml, iostat=nml_error) + ! read forcing_nml + nml_name = 'forcing_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + + ! goto namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: forcing_nml rewind ', & + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=forcing_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: '// trim(nml_name)//' reading '// & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: forcing_nml reading ', & - file=__FILE__, line=__LINE__) - endif + ! done reading namelist. close(nu_nml) call release_fileunit(nu_nml) endif diff --git a/cicecore/cicedyn/infrastructure/ice_domain.F90 b/cicecore/cicedyn/infrastructure/ice_domain.F90 index 10254aa93..ff1fac723 100644 --- a/cicecore/cicedyn/infrastructure/ice_domain.F90 +++ b/cicecore/cicedyn/infrastructure/ice_domain.F90 @@ -104,7 +104,7 @@ subroutine init_domain_blocks use ice_distribution, only: processor_shape use ice_domain_size, only: ncat, nilyr, nslyr, max_blocks, & nx_global, ny_global, block_size_x, block_size_y - + use ice_fileunits, only: goto_nml !---------------------------------------------------------------------- ! ! local variables @@ -114,6 +114,9 @@ subroutine init_domain_blocks integer (int_kind) :: & nml_error ! namelist read error flag + character(len=char_len) :: nml_name ! text namelist name + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=*), parameter :: subname = '(init_domain_blocks)' !---------------------------------------------------------------------- @@ -167,26 +170,39 @@ subroutine init_domain_blocks landblockelim = .true. ! on by default if (my_task == master_task) then - write(nu_diag,*) subname,' Reading domain_nml' - + nml_name = 'domain_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then call abort_ice(subname//'ERROR: domain_nml open file '// & - trim(nml_filename), & - file=__FILE__, line=__LINE__) + trim(nml_filename), & + file=__FILE__, line=__LINE__) endif + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=domain_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: domain_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) + endif call broadcast_scalar(nprocs, master_task) diff --git a/cicecore/shared/ice_fileunits.F90 b/cicecore/shared/ice_fileunits.F90 index c8ca3a937..7e425e5e7 100644 --- a/cicecore/shared/ice_fileunits.F90 +++ b/cicecore/shared/ice_fileunits.F90 @@ -28,7 +28,8 @@ module ice_fileunits implicit none private public :: init_fileunits, get_fileunit, flush_fileunit, & - release_fileunit, release_all_fileunits + release_fileunit, release_all_fileunits, & + goto_nml character (len=char_len), public :: & diag_type ! 'stdout' or 'file' @@ -322,6 +323,56 @@ subroutine flush_fileunit(iunit) end subroutine flush_fileunit +!======================================================================= + +!======================================================= + + subroutine goto_nml(iunit, nml, status) + ! Search to namelist group within ice_in file. + ! for compilers that do not allow optional namelists + + ! passed variables + integer(kind=int_kind), intent(in) :: & + iunit ! namelist file unit + + character(len=*), intent(in) :: & + nml ! namelist to search for + + integer(kind=int_kind), intent(out) :: & + status ! status of subrouine + + ! local variables + character(len=char_len) :: & + file_str, & ! string in file + nml_str ! namelist string to test + + integer(kind=int_kind) :: & + i, n ! dummy integers + + + ! rewind file + rewind(iunit) + + ! define test string with ampersand + nml_str = '&' // trim(adjustl(nml)) + + ! search for the record containing the namelist group we're looking for + do + read(iunit, '(a)', iostat=status) file_str + if (status /= 0) then + exit ! e.g. end of file + else + if (index(adjustl(file_str), nml_str) == 1) then + exit ! i.e. found record we're looking for + end if + end if + end do + + ! backspace to namelist name in file + backspace(iunit) + + end subroutine goto_nml + !======================================================================= end module ice_fileunits From 0bf0fdcabd00df95d1028a66f71784b0b68f1178 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Fri, 16 Dec 2022 17:43:19 -0700 Subject: [PATCH 47/57] Fix OMP setup and 'setmask' (#806) * Fix OMP setup * Update meshgrid --- cicecore/cicedyn/general/ice_init.F90 | 3 +-- cicecore/cicedyn/infrastructure/ice_grid.F90 | 4 +--- cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 | 4 ++-- cicecore/drivers/nuopc/cmeps/ice_import_export.F90 | 4 ++-- 4 files changed, 6 insertions(+), 9 deletions(-) diff --git a/cicecore/cicedyn/general/ice_init.F90 b/cicecore/cicedyn/general/ice_init.F90 index 1d16d1ac2..d56ad002e 100644 --- a/cicecore/cicedyn/general/ice_init.F90 +++ b/cicecore/cicedyn/general/ice_init.F90 @@ -2422,8 +2422,7 @@ subroutine input_data grid_type /= 'rectangular' .and. & grid_type /= 'cpom_grid' .and. & grid_type /= 'regional' .and. & - grid_type /= 'latlon' .and. & - grid_type /= 'setmask' ) then + grid_type /= 'latlon') then if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown grid_type=',trim(grid_type) abort_list = trim(abort_list)//":20" endif diff --git a/cicecore/cicedyn/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 index d6c612f00..b775c21f2 100644 --- a/cicecore/cicedyn/infrastructure/ice_grid.F90 +++ b/cicecore/cicedyn/infrastructure/ice_grid.F90 @@ -508,7 +508,6 @@ subroutine init_grid2 !----------------------------------------------------------------- ! This code does not work in CESM. Needs to be investigated further. -#ifndef CESMCOUPLED #if defined (_OPENMP) !$OMP PARALLEL DO ORDERED PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks @@ -521,12 +520,11 @@ subroutine init_grid2 write(nu_diag,*) subname,' omp schedule = ',ompsk,ompcs endif write(nu_diag,*) subname,' block, thread = ',iblk,OMP_GET_THREAD_NUM() - call flush_fileunit(nu_diag) !$OMP END ORDERED endif enddo !$OMP END PARALLEL DO -#endif + call flush_fileunit(nu_diag) #endif !----------------------------------------------------------------- diff --git a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 index 5f2995132..b94fcff05 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 @@ -21,7 +21,7 @@ module ice_comp_nuopc use ice_import_export , only : ice_import, ice_export, ice_advertise_fields, ice_realize_fields use ice_domain_size , only : nx_global, ny_global - use ice_grid , only : grid_type, init_grid2 + use ice_grid , only : grid_format, init_grid2 use ice_communicate , only : init_communicate, my_task, master_task, mpi_comm_ice use ice_calendar , only : force_restart_now, write_ic, init_calendar use ice_calendar , only : idate, mday, mmonth, myear, year_init @@ -684,7 +684,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return ! Initialize the cice mesh and the cice mask - if (trim(grid_type) == 'setmask') then + if (trim(grid_format) == 'meshnc') then ! In this case cap code determines the mask file call ice_mesh_setmask_from_maskfile(ice_maskfile, ice_mesh, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return diff --git a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 index e4db010de..60059e39a 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 @@ -27,7 +27,7 @@ module ice_import_export use ice_arrays_column , only : floe_rad_c, wave_spectrum use ice_state , only : vice, vsno, aice, aicen_init, trcr, trcrn use ice_grid , only : tlon, tlat, tarea, tmask, anglet, hm - use ice_grid , only : grid_type + use ice_grid , only : grid_format use ice_mesh_mod , only : ocn_gridcell_frac use ice_boundary , only : ice_HaloUpdate use ice_fileunits , only : nu_diag, flush_fileunit @@ -1059,7 +1059,7 @@ subroutine ice_export( exportState, rc ) call state_setexport(exportState, 'ice_fraction', input=ailohi, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (trim(grid_type) == 'setmask') then + if (trim(grid_format) == 'meshnc') then call state_setexport(exportState, 'ice_mask', input=ocn_gridcell_frac, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return else From ed472ab7daa2b4ddd50f79300c22cc4cdc11f13c Mon Sep 17 00:00:00 2001 From: "denise.worthen" Date: Mon, 9 Jan 2023 12:53:05 +0000 Subject: [PATCH 48/57] fix for nudiag_set allow nudiag_set to be available outside of cesm; may prefer to fix in coupling interface --- cicecore/shared/ice_fileunits.F90 | 2 -- 1 file changed, 2 deletions(-) diff --git a/cicecore/shared/ice_fileunits.F90 b/cicecore/shared/ice_fileunits.F90 index 7e425e5e7..72a40f513 100644 --- a/cicecore/shared/ice_fileunits.F90 +++ b/cicecore/shared/ice_fileunits.F90 @@ -81,10 +81,8 @@ module ice_fileunits integer (kind=int_kind), public :: & nu_diag = ice_stdout ! diagnostics output file, unit number may be overwritten -#ifdef CESMCOUPLED logical (kind=log_kind), public :: & nu_diag_set = .false. ! flag to indicate whether nu_diag is already set -#endif integer (kind=int_kind), public :: & ice_IOUnitsMinUnit = 11, & ! do not use unit numbers below From 28fdbebf4a3b98485c1e977344fd402f29f6a0b6 Mon Sep 17 00:00:00 2001 From: JFLemieux73 <31927797+JFLemieux73@users.noreply.github.com> Date: Thu, 19 Jan 2023 14:59:26 -0500 Subject: [PATCH 49/57] Fix for rare instability in (probabilistic) seabed stress (#810) * Modified doc for Dupont et al ref * Minor modif to v_i calculation in seabed_stress_factor_prob to prevent rare instability --- cicecore/cicedyn/dynamics/ice_dyn_shared.F90 | 11 ++++++----- cicecore/cicedyn/general/ice_init.F90 | 2 +- doc/source/master_list.bib | 10 ++++++++++ doc/source/science_guide/sg_dynamics.rst | 2 +- doc/source/user_guide/ug_case_settings.rst | 2 +- 5 files changed, 19 insertions(+), 8 deletions(-) diff --git a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 index a12e6fddd..50f1aae6e 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 @@ -59,7 +59,7 @@ module ice_dyn_shared yield_curve , & ! 'ellipse' ('teardrop' needs further testing) visc_method , & ! method for viscosity calc at U points (C, CD grids) seabed_stress_method ! method for seabed stress calculation - ! LKD: Lemieux et al. 2015, probabilistic: Dupont et al. in prep. + ! LKD: Lemieux et al. 2015, probabilistic: Dupont et al. 2022 real (kind=dbl_kind), parameter, public :: & u0 = 5e-5_dbl_kind, & ! residual velocity for seabed stress (m/s) @@ -1347,8 +1347,9 @@ end subroutine seabed_stress_factor_LKD ! a normal distribution with sigma_b = 2.5d0. An improvement would ! be to provide the distribution based on high resolution data. ! -! Dupont, F. Dumont, D., Lemieux, J.F., Dumas-Lefebvre, E., Caya, A. -! in prep. +! Dupont, F., D. Dumont, J.F. Lemieux, E. Dumas-Lefebvre, A. Caya (2022). +! A probabilistic seabed-ice keel interaction model, The Cryosphere, 16, +! 1963-1977. ! ! authors: D. Dumont, J.F. Lemieux, E. Dumas-Lefebvre, F. Dupont ! @@ -1481,13 +1482,13 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & do n =1, ncat v_i = v_i + vcat(n)**2 / (max(acat(n), puny)) enddo - v_i = v_i - m_i**2 + v_i = max((v_i - m_i**2), puny) mu_i = log(m_i/sqrt(c1 + v_i/m_i**2)) ! parameters for the log-normal sigma_i = sqrt(log(c1 + v_i/m_i**2)) ! max thickness associated with percentile of log-normal PDF - ! x_kmax=x997 was obtained from an optimization procedure (Dupont et al.) + ! x_kmax=x997 was obtained from an optimization procedure (Dupont et al. 2022) x_kmax = exp(mu_i + sqrt(c2*sigma_i)*1.9430d0) diff --git a/cicecore/cicedyn/general/ice_init.F90 b/cicecore/cicedyn/general/ice_init.F90 index d56ad002e..4c8fb1fee 100644 --- a/cicecore/cicedyn/general/ice_init.F90 +++ b/cicecore/cicedyn/general/ice_init.F90 @@ -392,7 +392,7 @@ subroutine input_data dyscale = 1.0_dbl_kind ! user defined rectgrid y-grid scale factor (e.g., 1.02) close_boundaries = .false. ! true = set land on edges of grid seabed_stress= .false. ! if true, seabed stress for landfast is on - seabed_stress_method = 'LKD'! LKD = Lemieux et al 2015, probabilistic = Dupont et al. in prep + seabed_stress_method = 'LKD'! LKD = Lemieux et al 2015, probabilistic = Dupont et al. 2022 k1 = 7.5_dbl_kind ! 1st free parameter for landfast parameterization k2 = 15.0_dbl_kind ! 2nd free parameter (N/m^3) for landfast parametrization alphab = 20.0_dbl_kind ! alphab=Cb factor in Lemieux et al 2015 diff --git a/doc/source/master_list.bib b/doc/source/master_list.bib index a2da9b9f8..9e387efb9 100644 --- a/doc/source/master_list.bib +++ b/doc/source/master_list.bib @@ -1082,6 +1082,16 @@ @article{Bouchat22 year = {2022} } +@Article{Dupont22, + author = {F. Dupont and D. Dumont and J.F. Lemieux and E. Dumas-Lefebvre and A. Caya}, + title = "{A probabilistic seabed-ice keel interaction model}", + journal = TC, + year = {2022}, + volume = {16}, + pages = {1963-1977}, + url = {https://doi.org/10.5194/tc-16-1963-2022} +} + @Article{Tsujino18, author = "H. Tsujino and S. Urakawa and R.J. Small and W.M. Kim and S.G. Yeager and et al.", title = "{JRA‐55 based surface dataset for driving ocean–sea‐ice models (JRA55‐do)}", diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index e6b918538..6b269d453 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -382,7 +382,7 @@ The value of :math:`k_1` can be changed at runtime using the namelist variable ` This more sophisticated grounding parameterization computes the seabed stress based on the probability of contact between the ice thickness distribution -(ITD) and the seabed. Multi-thickness category models such as CICE typically use a +(ITD) and the seabed :cite:`Dupont22`. Multi-thickness category models such as CICE typically use a few thickness categories (5-10). This crude representation of the ITD does not resolve the tail of the ITD, which is crucial for grounding events. diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 587adcd56..9906fba87 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -485,7 +485,7 @@ dynamics_nml "``revised_evp``", "logical", "use revised EVP formulation", "``.false.``" "``seabed_stress``", "logical", "use seabed stress parameterization for landfast ice", "``.false.``" "``seabed_stress_method``", "``LKD``", "linear keel draft method :cite:`Lemieux16`", "``LKD``" - "", "``probabilistic``", "probability of contact method (Dupont et al., in prep)", "" + "", "``probabilistic``", "probability of contact method :cite:`Dupont22`", "" "``ssh_stress``", "``coupled``", "computed from coupled sea surface height gradient", "``geostrophic``" "", "``geostropic``", "computed from ocean velocity", "" "``threshold_hw``", "real", "Max water depth for grounding (see :cite:`Amundrud04`)", "30." From b946a95ea46096d739b4b0d725d0eab81a53fbee Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Tue, 31 Jan 2023 20:17:26 -0700 Subject: [PATCH 50/57] Some small CESM updates. (#812) * Fix OMP setup * Update meshgrid * Small updates for CESM * Add change for UFS --- cicecore/cicedyn/infrastructure/ice_grid.F90 | 1 - cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 | 6 ++++++ cicecore/shared/ice_fileunits.F90 | 2 -- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/cicecore/cicedyn/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 index b775c21f2..0d56d3400 100644 --- a/cicecore/cicedyn/infrastructure/ice_grid.F90 +++ b/cicecore/cicedyn/infrastructure/ice_grid.F90 @@ -507,7 +507,6 @@ subroutine init_grid2 ! Diagnose OpenMP thread schedule, force order in output !----------------------------------------------------------------- -! This code does not work in CESM. Needs to be investigated further. #if defined (_OPENMP) !$OMP PARALLEL DO ORDERED PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks diff --git a/cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 index 7019f7128..cdfbac87a 100644 --- a/cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 @@ -749,6 +749,9 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & ! if (ndim3 == ncat .and. ncat>1) then if (ndim3 == ncat .and. ndims == 3) then call pio_read_darray(File, vardesc, iodesc3d_ncat, work, status) +#ifdef CESMCOUPLED + where (work == PIO_FILL_DOUBLE) work = c0 +#endif if (present(field_loc)) then do n=1,ndim3 call ice_HaloUpdate (work(:,:,n,:), halo_info, & @@ -758,6 +761,9 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & ! elseif (ndim3 == 1) then elseif (ndim3 == 1 .and. ndims == 2) then call pio_read_darray(File, vardesc, iodesc2d, work, status) +#ifdef CESMCOUPLED + where (work == PIO_FILL_DOUBLE) work = c0 +#endif if (present(field_loc)) then call ice_HaloUpdate (work(:,:,1,:), halo_info, & field_loc, field_type) diff --git a/cicecore/shared/ice_fileunits.F90 b/cicecore/shared/ice_fileunits.F90 index 7e425e5e7..72a40f513 100644 --- a/cicecore/shared/ice_fileunits.F90 +++ b/cicecore/shared/ice_fileunits.F90 @@ -81,10 +81,8 @@ module ice_fileunits integer (kind=int_kind), public :: & nu_diag = ice_stdout ! diagnostics output file, unit number may be overwritten -#ifdef CESMCOUPLED logical (kind=log_kind), public :: & nu_diag_set = .false. ! flag to indicate whether nu_diag is already set -#endif integer (kind=int_kind), public :: & ice_IOUnitsMinUnit = 11, & ! do not use unit numbers below From d73bb8b064e062217afc54e33faa9a0247be50e3 Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Thu, 2 Mar 2023 15:15:46 -0700 Subject: [PATCH 51/57] Add time_period_freq (#816) * Add time_period_freq to history file metadata --- .../io/io_netcdf/ice_history_write.F90 | 22 ++++++++++++++++++- .../io/io_pio2/ice_history_write.F90 | 18 +++++++++++++++ 2 files changed, 39 insertions(+), 1 deletion(-) diff --git a/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 index 019ab8ce9..d85ec5e3c 100644 --- a/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 @@ -48,7 +48,7 @@ subroutine ice_write_hist (ns) use ice_blocks, only: nx_block, ny_block use ice_broadcast, only: broadcast_scalar use ice_calendar, only: msec, timesecs, idate, idate0, write_ic, & - histfreq, days_per_year, use_leap_years, dayyr, & + histfreq, histfreq_n, days_per_year, use_leap_years, dayyr, & hh_init, mm_init, ss_init use ice_communicate, only: my_task, master_task use ice_domain, only: distrb_info @@ -86,6 +86,7 @@ subroutine ice_write_hist (ns) integer (kind=int_kind), dimension(6) :: dimidex real (kind=dbl_kind) :: ltime2 character (char_len) :: title + character (char_len) :: time_period_freq = 'none' character (char_len_long) :: ncfile(max_nstrm) real (kind=dbl_kind) :: secday, rad_to_deg @@ -682,6 +683,25 @@ subroutine ice_write_hist (ns) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: global attribute date2') + select case (histfreq(ns)) + case ("y", "Y") + write(time_period_freq,'(a,i0)') 'year_',histfreq_n(ns) + case ("m", "M") + write(time_period_freq,'(a,i0)') 'month_',histfreq_n(ns) + case ("d", "D") + write(time_period_freq,'(a,i0)') 'day_',histfreq_n(ns) + case ("h", "H") + write(time_period_freq,'(a,i0)') 'hour_',histfreq_n(ns) + case ("1") + write(time_period_freq,'(a,i0)') 'step_',histfreq_n(ns) + end select + + if (.not.write_ic .and. trim(time_period_freq) /= 'none') then + status = nf90_put_att(ncid,nf90_global,'time_period_freq',trim(time_period_freq)) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: global attribute time_period_freq') + endif + title = 'CF-1.0' status = & nf90_put_att(ncid,nf90_global,'conventions',title) diff --git a/cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 index 6407d8c76..a697a98d5 100644 --- a/cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 @@ -76,6 +76,7 @@ subroutine ice_write_hist (ns) integer (kind=int_kind), dimension(6) :: dimidex real (kind= dbl_kind) :: ltime2 character (char_len) :: title + character (char_len) :: time_period_freq = 'none' character (char_len_long) :: ncfile(max_nstrm) integer (kind=int_kind) :: iotype @@ -649,6 +650,23 @@ subroutine ice_write_hist (ns) write(title,'(a,i6)') 'seconds elapsed into model date: ',msec status = pio_put_att(File,pio_global,'comment3',trim(title)) + select case (histfreq(ns)) + case ("y", "Y") + write(time_period_freq,'(a,i0)') 'year_',histfreq_n(ns) + case ("m", "M") + write(time_period_freq,'(a,i0)') 'month_',histfreq_n(ns) + case ("d", "D") + write(time_period_freq,'(a,i0)') 'day_',histfreq_n(ns) + case ("h", "H") + write(time_period_freq,'(a,i0)') 'hour_',histfreq_n(ns) + case ("1") + write(time_period_freq,'(a,i0)') 'step_',histfreq_n(ns) + end select + + if (.not.write_ic .and. trim(time_period_freq) /= 'none') then + status = pio_put_att(File,pio_global,'time_period_freq',trim(time_period_freq)) + endif + title = 'CF-1.0' status = & pio_put_att(File,pio_global,'conventions',trim(title)) From 0fcc140c1958eea95e6432371c179cb9cfb74429 Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Wed, 8 Mar 2023 18:58:28 -0500 Subject: [PATCH 52/57] doc, ice_history: correct units for 'sigP' and improve doc for stress variables (#817) * doc/source/conf.py: adjust for sphinxcontrib.bibtex 2.0 The sphinxcontrib.bibtex Sphinx extension used for the bibliography now wants the bibliography file to be configured in the Sphinx configuration file (conf.py) instead of in the source file where the bibliography is included. This is new in sphinxcontrib.bibtex 2.0 [1], so let's do that. Keeping the filename also in zreferences.rst does not hurt and lets us stay compatible with earlier versions of sphinxcontrib.bibtex, so let's keep it there also. [1] https://sphinxcontrib-bibtex.readthedocs.io/en/latest/changes.html#id5 * ice_history: correct units for 'sigP' The intenal ice pressure 'sigP' is is units of N/m, as can be seen in ice_dyn_shared::principal_stress. However, the corresponding history variable is wrongly defined in ice_history::init_hist with unit '1' (dimensionless). This means the wrong unit is written to the NetCDF history output. This dates back to the introduction of that variable in 6ed2359 (Added pressure, modified norm of principal stresses and made small modifs to basal stress following Till's comments, 2018-03-02). Fix the unit. While at it, add an entry for 'sigP' in the index, from which this variable is missing. Reported-by: Frederic Dupont Reported-by: Jean-Francois Lemieux * doc: clarify stress variables Try to make the doc a little less confusing by cross-referencing the code variables used for stress computations with the corresponding variables in the science guide a little bit more, and vice-versa: - mention the doc variables sigma_1, sigma_2 in the index entries for stressp, stressm - mention the code variables stressp, stressm when the doc variables sigma_1, sigma_2 are introduced - introduce new doc variables sigma_n,1 and sigma_n,2 to denote the normalized principal stresses, and add the equation for those. This allows mentioning that they are normalized by the ice strength, which was not mentioned elsewhere. - mention these new doc variables in the index entry for sig1, sig2 - refer to the normal stress sigma_11, sigma_22 by their variable names when mentioning them in the sentence that introduces the ice pressure - mention the code variables sig1, sig2 in the "Implementation" part of the user guide when mentioning the 'principal_stresses' subroutine. Helped-by: Jean-Francois Lemieux --- cicecore/cicedyn/analysis/ice_history.F90 | 2 +- doc/source/cice_index.rst | 7 ++++--- doc/source/conf.py | 3 +++ doc/source/science_guide/sg_dynamics.rst | 16 ++++++++++++---- doc/source/user_guide/ug_implementation.rst | 2 +- 5 files changed, 21 insertions(+), 9 deletions(-) diff --git a/cicecore/cicedyn/analysis/ice_history.F90 b/cicecore/cicedyn/analysis/ice_history.F90 index f19158f6a..54b6ce934 100644 --- a/cicecore/cicedyn/analysis/ice_history.F90 +++ b/cicecore/cicedyn/analysis/ice_history.F90 @@ -1341,7 +1341,7 @@ subroutine init_hist (dt) "sig2 is instantaneous" // trim(description), c1, c0, & ns1, f_sig2) - call define_hist_field(n_sigP,"sigP","1",gridstr2d, gridstr, & + call define_hist_field(n_sigP,"sigP","N/m",gridstr2d, gridstr, & "ice pressure", & "sigP is instantaneous" // trim(description), c1, c0, & ns1, f_sigP) diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 0e9d21517..000004bb9 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -620,7 +620,8 @@ either Celsius or Kelvin units). Deprecated parameters are listed at the end. "shear", "strain rate II component", "1/s" "shlat", "southern latitude of artificial mask edge", "30\ :math:`^\circ`\ N" "shortwave", "flag for shortwave parameterization (‘ccsm3’ or ‘dEdd’)", "" - "sig1(2)", "principal stress components (diagnostic)", "" + "sig1(2)", "principal stress components :math:`\sigma_{n,1}`, :math:`\sigma_{n,2}` (diagnostic)", "" + "sigP", "internal ice pressure", "N/m" "sil", "silicate concentration", "mmol/m\ :math:`^3`" "sinw", "sine of the turning angle in water", "0." "Sinz", "ice salinity profile", "ppt" @@ -660,8 +661,8 @@ either Celsius or Kelvin units). Deprecated parameters are listed at the end. "strax(y)", "wind stress components from data", "N/m\ :math:`^2`" "strength", "ice strength", "N/m" "stress12", "internal ice stress, :math:`\sigma_{12}`", "N/m" - "stressm", "internal ice stress, :math:`\sigma_{11}-\sigma_{22}`", "N/m" - "stressp", "internal ice stress, :math:`\sigma_{11}+\sigma_{22}`", "N/m" + "stressm", "internal ice stress, :math:`\sigma_{11}-\sigma_{22}` (:math:`\sigma_2` in the doc)", "N/m" + "stressp", "internal ice stress, :math:`\sigma_{11}+\sigma_{22}` (:math:`\sigma_1` in the doc)", "N/m" "strintx(y)U", "divergence of internal ice stress, x(y)", "N/m\ :math:`^2`" "strocnx(y)U", "ice–ocean stress in the x(y)-direction (U-cell)", "N/m\ :math:`^2`" "strocnx(y)T", "ice–ocean stress, x(y)-dir. (T-cell)", "N/m\ :math:`^2`" diff --git a/doc/source/conf.py b/doc/source/conf.py index 88b98bc09..7d79f7b43 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -38,6 +38,9 @@ 'sphinxcontrib.bibtex', ] +# Name of the bibliography file for sphinxcontrib.bibtex. +bibtex_bibfiles = ['master_list.bib'] + # Add any paths that contain templates here, relative to this directory. templates_path = ['_templates'] diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index 6b269d453..fd5e2d760 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -449,8 +449,8 @@ Rheology ******** For convenience we formulate the stress tensor :math:`\bf \sigma` in -terms of :math:`\sigma_1=\sigma_{11}+\sigma_{22}`, -:math:`\sigma_2=\sigma_{11}-\sigma_{22}`, and introduce the +terms of :math:`\sigma_1=\sigma_{11}+\sigma_{22}` (``stressp``), +:math:`\sigma_2=\sigma_{11}-\sigma_{22}` (``stressm``), and introduce the divergence, :math:`D_D`, and the horizontal tension and shearing strain rates, :math:`D_T` and :math:`D_S` respectively: @@ -468,8 +468,16 @@ where .. math:: \dot{\epsilon}_{ij} = {1\over 2}\left({{\partial u_i}\over{\partial x_j}} + {{\partial u_j}\over{\partial x_i}}\right) -CICE can output the internal ice pressure which is an important field to support navigation in ice-infested water. -The internal ice pressure (``sigP``) is the average of the normal stresses multiplied by :math:`-1` and +Note that :math:`\sigma_1` and :math:`\sigma_2` are not to be confused with the normalized principal stresses, +:math:`\sigma_{n,1}` and :math:`\sigma_{n,2}` (``sig1`` and ``sig2``), which are defined as: + +.. math:: + \sigma_{n,1}, \sigma_{n,2} = \frac{1}{P} \left( \frac{\sigma_1}{2} \pm \sqrt{\left(\frac{\sigma_2}{2}\right)^2 + \sigma_{12}^2} \right) + +where :math:`P` is the ice strength. + +In addition to the normalized principal stresses, CICE can output the internal ice pressure which is an important field to support navigation in ice-infested water. +The internal ice pressure (``sigP``) is the average of the normal stresses (:math:`\sigma_{11}`, :math:`\sigma_{22}`) multiplied by :math:`-1` and is therefore simply equal to :math:`-\sigma_1/2`. .. _stress-vp: diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 5ed2092c0..047204380 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -1207,7 +1207,7 @@ directory in **iceh\_ic.[timeID].nc(da)**. Several history variables are hard-coded for instantaneous output regardless of the ``hist_avg`` averaging flag, at the frequency given by their namelist flag. -The normalized principal components of internal ice stress are computed +The normalized principal components of internal ice stress (``sig1``, ``sig2``) are computed in *principal\_stress* and written to the history file. This calculation is not necessary for the simulation; principal stresses are merely computed for diagnostic purposes and included here for the user’s From 9a55ad9b71444e5600c517e687b5f5d641ee6dea Mon Sep 17 00:00:00 2001 From: Philippe Blain Date: Mon, 13 Mar 2023 18:25:28 -0400 Subject: [PATCH 53/57] cicecore: correct initial condition metadata (#818) * ice_history_write: fix initial condition metadata under 'hist_avg' When writing averaged history outputs (hist_avg=.true.), this setting also affects the initial condition. Even if the actual data variables written to the initial condition are not averaged (they are taken more or less directly from the restart or the hard-coded defaults, modulo aggregation over categories), their attributes ('cell_method' and 'time_rep') imply they are averaged, and the 'bound' attribute of the 'time' variable refers to the 'time_bounds' variable. Make the metadata of the initial condition more correct by: - not writing the 'time_bounds' variable (and the corresponding 'd2' dimension) - not writing the 'bounds' attribute of the 'time' variable - not writing the 'cell_method' attributes of each variable - writing the 'time_rep' attribute of each variable as 'instantaneous' instead of 'averaged'. Do this by checking 'write_ic' at all places where we check for the value of 'hist_avg' to write the above variables and attributes in each of the 3 IO backends (binary, netcdf, pio2). * drivers/{nemo_concepts,standalone}: write initial condition at initial time In CICE_InitMod::cice_init, we call ice_calendar::advance_timestep before writing the initial condition, such that the 'time' variable in the initial condition is not zero; it has a value of 1*dt (the model time step). The initial condition filename also reflects this, since 'msec' (model seconds) also has a value of 1*dt and is used in ice_history_shared::construct_filename. This leads to the initial condition filename not corresponding to the model initialization date/time but rather 1*dt later. Since we call 'accum_hist' after initializing the forcing, any forcing field written to the initial condition has values corresponding to msec=dt, whereas the ice state corresponds to msec=0, leading to an inconsistency. Fix that by calling 'accum_hist' to write the initial condition _before_ calling 'advance_timestep'. Since we now call 'accum_hist' before initializing the forcing, any forcing field written to the initial condition have its default, hard-coded value, instead of its value at time=dt. An improvement would be to read the forcing at time=dt, write the initial condition, advance the time step, and read the forcing again, but let's not complicate things too much for now. --- .../io/io_binary/ice_history_write.F90 | 17 +++++++++-------- .../io/io_netcdf/ice_history_write.F90 | 13 +++++++------ .../io/io_pio2/ice_history_write.F90 | 13 +++++++------ .../direct/nemo_concepts/CICE_InitMod.F90 | 4 ++-- .../drivers/standalone/cice/CICE_InitMod.F90 | 4 ++-- 5 files changed, 27 insertions(+), 24 deletions(-) diff --git a/cicecore/cicedyn/infrastructure/io/io_binary/ice_history_write.F90 b/cicecore/cicedyn/infrastructure/io/io_binary/ice_history_write.F90 index 2a3f042c3..9df51635d 100644 --- a/cicecore/cicedyn/infrastructure/io/io_binary/ice_history_write.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_binary/ice_history_write.F90 @@ -158,6 +158,7 @@ subroutine ice_write_hist(ns) trim(avail_hist_fields(n)%vcomment) if (histfreq(ns) == '1' .or. .not. hist_avg & + .or. write_ic & .or. n==n_divu(ns) .or. n==n_shear(ns) & ! snapshots .or. n==n_sig1(ns) .or. n==n_sig2(ns) & .or. n==n_sigP(ns) .or. n==n_trsig(ns) & @@ -186,7 +187,7 @@ subroutine ice_write_hist(ns) write (nu_hdr, 994) nrec,trim(avail_hist_fields(n)%vname), & trim(avail_hist_fields(n)%vdesc),trim(avail_hist_fields(n)%vunit),nn - if (histfreq(ns) == '1' .or. .not. hist_avg) then + if (histfreq(ns) == '1' .or. .not. hist_avg .or. write_ic) then write (nu_hdr, 996) nrec,trim(avail_hist_fields(n)%vname), & 'time_rep','instantaneous' else @@ -210,7 +211,7 @@ subroutine ice_write_hist(ns) write (nu_hdr, 993) nrec,trim(avail_hist_fields(n)%vname), & trim(avail_hist_fields(n)%vdesc),trim(avail_hist_fields(n)%vunit),k - if (histfreq(ns) == '1' .or. .not. hist_avg) then + if (histfreq(ns) == '1' .or. .not. hist_avg .or. write_ic) then write (nu_hdr, 996) nrec,trim(avail_hist_fields(n)%vname), & 'time_rep','instantaneous' else @@ -234,7 +235,7 @@ subroutine ice_write_hist(ns) write (nu_hdr, 993) nrec,trim(avail_hist_fields(n)%vname), & trim(avail_hist_fields(n)%vdesc),trim(avail_hist_fields(n)%vunit),nn,k - if (histfreq(ns) == '1' .or. .not. hist_avg) then + if (histfreq(ns) == '1' .or. .not. hist_avg .or. write_ic) then write (nu_hdr, 996) nrec,trim(avail_hist_fields(n)%vname), & 'time_rep','instantaneous' else @@ -258,7 +259,7 @@ subroutine ice_write_hist(ns) write (nu_hdr, 993) nrec,trim(avail_hist_fields(n)%vname), & trim(avail_hist_fields(n)%vdesc),trim(avail_hist_fields(n)%vunit),nn,k - if (histfreq(ns) == '1' .or. .not. hist_avg) then + if (histfreq(ns) == '1' .or. .not. hist_avg .or. write_ic) then write (nu_hdr, 996) nrec,trim(avail_hist_fields(n)%vname), & 'time_rep','instantaneous' else @@ -282,7 +283,7 @@ subroutine ice_write_hist(ns) write (nu_hdr, 993) nrec,trim(avail_hist_fields(n)%vname), & trim(avail_hist_fields(n)%vdesc),trim(avail_hist_fields(n)%vunit),nn,k - if (histfreq(ns) == '1' .or. .not. hist_avg) then + if (histfreq(ns) == '1' .or. .not. hist_avg .or. write_ic) then write (nu_hdr, 996) nrec,trim(avail_hist_fields(n)%vname), & 'time_rep','instantaneous' else @@ -307,7 +308,7 @@ subroutine ice_write_hist(ns) write (nu_hdr, 993) nrec,trim(avail_hist_fields(n)%vname), & trim(avail_hist_fields(n)%vdesc),trim(avail_hist_fields(n)%vunit),nn,k - if (histfreq(ns) == '1' .or. .not. hist_avg) then + if (histfreq(ns) == '1' .or. .not. hist_avg .or. write_ic) then write (nu_hdr, 996) nrec,trim(avail_hist_fields(n)%vname), & 'time_rep','instantaneous' else @@ -333,7 +334,7 @@ subroutine ice_write_hist(ns) write (nu_hdr, 993) nrec,trim(avail_hist_fields(n)%vname), & trim(avail_hist_fields(n)%vdesc),trim(avail_hist_fields(n)%vunit),nn,k - if (histfreq(ns) == '1' .or. .not. hist_avg) then + if (histfreq(ns) == '1' .or. .not. hist_avg .or. write_ic) then write (nu_hdr, 996) nrec,trim(avail_hist_fields(n)%vname), & 'time_rep','instantaneous' else @@ -359,7 +360,7 @@ subroutine ice_write_hist(ns) write (nu_hdr, 993) nrec,trim(avail_hist_fields(n)%vname), & trim(avail_hist_fields(n)%vdesc),trim(avail_hist_fields(n)%vunit),nn,k - if (histfreq(ns) == '1' .or. .not. hist_avg) then + if (histfreq(ns) == '1' .or. .not. hist_avg .or. write_ic) then write (nu_hdr, 996) nrec,trim(avail_hist_fields(n)%vname), & 'time_rep','instantaneous' else diff --git a/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 index d85ec5e3c..10d750300 100644 --- a/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 @@ -159,7 +159,7 @@ subroutine ice_write_hist (ns) ! define dimensions !----------------------------------------------------------------- - if (hist_avg) then + if (hist_avg .and. .not. write_ic) then status = nf90_def_dim(ncid,'d2',2,boundid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: defining dim d2') @@ -241,7 +241,7 @@ subroutine ice_write_hist (ns) call abort_ice(subname//'ERROR: invalid calendar settings') endif - if (hist_avg) then + if (hist_avg .and. .not. write_ic) then status = nf90_put_att(ncid,varid,'bounds','time_bounds') if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: time bounds') @@ -251,7 +251,7 @@ subroutine ice_write_hist (ns) ! Define attributes for time bounds if hist_avg is true !----------------------------------------------------------------- - if (hist_avg) then + if (hist_avg .and. .not. write_ic) then dimid(1) = boundid dimid(2) = timid status = nf90_def_var(ncid,'time_bounds',lprecision,dimid(1:2),varid) @@ -745,7 +745,7 @@ subroutine ice_write_hist (ns) ! write time_bounds info !----------------------------------------------------------------- - if (hist_avg) then + if (hist_avg .and. .not. write_ic) then status = nf90_inq_varid(ncid,'time_bounds',varid) if (status /= nf90_noerr) call abort_ice(subname// & 'ERROR: getting time_bounds id') @@ -1236,7 +1236,7 @@ end subroutine ice_write_hist subroutine ice_write_hist_attrs(ncid, varid, hfield, ns) use ice_kinds_mod - use ice_calendar, only: histfreq, histfreq_n + use ice_calendar, only: histfreq, histfreq_n, write_ic use ice_history_shared, only: ice_hist_field, history_precision, & hist_avg #ifdef USE_NETCDF @@ -1279,7 +1279,7 @@ subroutine ice_write_hist_attrs(ncid, varid, hfield, ns) call ice_write_hist_fill(ncid,varid,hfield%vname,history_precision) ! Add cell_methods attribute to variables if averaged - if (hist_avg) then + if (hist_avg .and. .not. write_ic) then if (TRIM(hfield%vname(1:4))/='sig1' & .and.TRIM(hfield%vname(1:4))/='sig2' & .and.TRIM(hfield%vname(1:9))/='sistreave' & @@ -1293,6 +1293,7 @@ subroutine ice_write_hist_attrs(ncid, varid, hfield, ns) if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & .or..not. hist_avg & + .or. write_ic & .or.TRIM(hfield%vname(1:4))=='divu' & .or.TRIM(hfield%vname(1:5))=='shear' & .or.TRIM(hfield%vname(1:4))=='sig1' & diff --git a/cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 index a697a98d5..25f9850ce 100644 --- a/cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 @@ -195,7 +195,7 @@ subroutine ice_write_hist (ns) ! define dimensions !----------------------------------------------------------------- - if (hist_avg) then + if (hist_avg .and. .not. write_ic) then status = pio_def_dim(File,'d2',2,boundid) endif @@ -233,12 +233,12 @@ subroutine ice_write_hist (ns) call abort_ice(subname//'ERROR: invalid calendar settings') endif - if (hist_avg) then + if (hist_avg .and. .not. write_ic) then status = pio_put_att(File,varid,'bounds','time_bounds') endif ! Define attributes for time_bounds if hist_avg is true - if (hist_avg) then + if (hist_avg .and. .not. write_ic) then dimid2(1) = boundid dimid2(2) = timid status = pio_def_var(File,'time_bounds',pio_double,dimid2,varid) @@ -702,7 +702,7 @@ subroutine ice_write_hist (ns) ! write time_bounds info !----------------------------------------------------------------- - if (hist_avg) then + if (hist_avg .and. .not. write_ic) then status = pio_inq_varid(File,'time_bounds',varid) time_bounds=(/time_beg(ns),time_end(ns)/) bnd_start = (/1,1/) @@ -1219,7 +1219,7 @@ end subroutine ice_write_hist subroutine ice_write_hist_attrs(File, varid, hfield, ns) use ice_kinds_mod - use ice_calendar, only: histfreq, histfreq_n + use ice_calendar, only: histfreq, histfreq_n, write_ic use ice_history_shared, only: ice_hist_field, history_precision, & hist_avg use ice_pio @@ -1250,7 +1250,7 @@ subroutine ice_write_hist_attrs(File, varid, hfield, ns) call ice_write_hist_fill(File,varid,hfield%vname,history_precision) ! Add cell_methods attribute to variables if averaged - if (hist_avg) then + if (hist_avg .and. .not. write_ic) then if (TRIM(hfield%vname(1:4))/='sig1' & .and.TRIM(hfield%vname(1:4))/='sig2' & .and.TRIM(hfield%vname(1:9))/='sistreave' & @@ -1262,6 +1262,7 @@ subroutine ice_write_hist_attrs(File, varid, hfield, ns) if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & .or..not. hist_avg & + .or. write_ic & .or.TRIM(hfield%vname(1:4))=='divu' & .or.TRIM(hfield%vname(1:5))=='shear' & .or.TRIM(hfield%vname(1:4))=='sig1' & diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 index 85050d8c9..3a8f5e33d 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 @@ -185,6 +185,8 @@ subroutine cice_init if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer + if (write_ic) call accum_hist(dt) ! write initial conditions + ! determine the time and date at the end of the first timestep call advance_timestep() @@ -215,8 +217,6 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions - end subroutine cice_init !======================================================================= diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index 8de05a121..0371c7f38 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -199,6 +199,8 @@ subroutine cice_init if (trim(runtype) == 'continue' .or. restart) & call init_shortwave ! initialize radiative transfer + if (write_ic) call accum_hist(dt) ! write initial conditions + ! tcraig, use advance_timestep here ! istep = istep + 1 ! update time step counters ! istep1 = istep1 + 1 @@ -243,8 +245,6 @@ subroutine cice_init call init_flux_atm ! initialize atmosphere fluxes sent to coupler call init_flux_ocn ! initialize ocean fluxes sent to coupler - if (write_ic) call accum_hist(dt) ! write initial conditions - if (my_task == master_task) then call ice_memusage_print(nu_diag,subname//':end') endif From 75b792c11539834c8def2b6f060ed5f1364bd927 Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Mon, 13 Mar 2023 15:25:51 -0700 Subject: [PATCH 54/57] Add haloupdate unit test (#820) * Initial halochk unit test implementation - Add halochk unit test - Add "unknown" and "noupdate" checks to ice_boundary - Remove field_loc_Wface, not used anywhere, not supported - Update cice_decomp.csh script To Do: validate tripole and tripoleT, add unit test to test suite * - Fix bug in serial/ice_boundary.F90 tripoleT halo update - Reduce redundant tripole buffer copies in serial/ice_boundary.F90 - Generalize iSrc wraparound calculation in ice_boundary.F90 - Add open, cyclic, tripole, and tripoleT set_nml files - Update unittest suite * - Add haloUpdate_stress test to halo unit test - Add tripoleT support to haloUpdate_stress - Add abort check that nx_global is even for tripole grids - Update documentation * update documentation * update documentation * update documentation * update documentation * update documentation * update documentation * Update halochk test to make haloupdate_stress test more robust, less chance for false positive * update documentation * update documentation * update documentation --- .../infrastructure/comm/mpi/ice_boundary.F90 | 315 ++++++- .../comm/serial/ice_boundary.F90 | 442 +++++++--- cicecore/cicedyn/infrastructure/ice_grid.F90 | 5 + .../drivers/unittest/halochk/CICE_InitMod.F90 | 472 ++++++++++ cicecore/drivers/unittest/halochk/halochk.F90 | 811 ++++++++++++++++++ cicecore/shared/ice_constants.F90 | 3 +- configuration/scripts/Makefile | 6 +- configuration/scripts/cice_decomp.csh | 6 +- configuration/scripts/options/set_env.halochk | 2 + configuration/scripts/options/set_nml.cyclic | 3 + configuration/scripts/options/set_nml.open | 3 + configuration/scripts/options/set_nml.tripole | 3 + .../scripts/options/set_nml.tripolet | 3 + configuration/scripts/tests/unittest_suite.ts | 41 +- doc/source/user_guide/ug_implementation.rst | 61 +- doc/source/user_guide/ug_testing.rst | 1 + 16 files changed, 2017 insertions(+), 160 deletions(-) create mode 100644 cicecore/drivers/unittest/halochk/CICE_InitMod.F90 create mode 100644 cicecore/drivers/unittest/halochk/halochk.F90 create mode 100644 configuration/scripts/options/set_env.halochk create mode 100644 configuration/scripts/options/set_nml.cyclic create mode 100644 configuration/scripts/options/set_nml.open create mode 100644 configuration/scripts/options/set_nml.tripole create mode 100644 configuration/scripts/options/set_nml.tripolet diff --git a/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 index 68436cd0f..2a7d68c11 100644 --- a/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 +++ b/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 @@ -12,14 +12,53 @@ module ice_boundary ! fixes for non-existent blocks ! 2008-01-28: Elizabeth Hunke replaced old routines with new POP ! infrastructure +! +!----------------------------------------------------------------------- +! +! Some notes on tripole, A-H below are gridpoints at i = 1:nx_global +! where nx_global=8. The schematics below show the general layout of the center +! points on the tripole fold. More complex pictures are needed to show +! relative orientation and offsets of east, north, and northeast points +! across the fold. See also appendix E of the NEMO_manual, +! https://zenodo.org/record/6334656#.YiYirhPMLXQ. Note the NFtype=T +! is the tripole u-fold grid with T-grid=center, U-grid=east, V-grid=north, +! and F-grid=northeast points in CICE. NFtype=F is similar to tripoleT +! except for the treatment of the poles. The CICE implementation also +! averages all degenerate points, NEMO's strategy seems to be to copy +! data from one side of the tripole to the other for degenerate points. +! +! tripole: u-fold, fold is on north edge of ny_global +! north and northeast points on the fold are degenerate and averaged +! A,H,D,and E are pole points +! +! ny_global+2 H G F E D C B A @ny_global-1 +! ny_global+1 H G F E D C B A @ny_global +! ny_global A B C D E F G H +! ny_global-1 A B C D E F G H +! +! tripoleT: t-fold, fold is thru center of ny_global +! center and east points at ny_global are degenerate and averaged +! north and northeast point at ny_global are not prognostic, they are halos +! A and E are pole points +! +! ny_global+2 H G F E D C B A @ny_global-2 +! ny_global+1 H G F E D C B A @ny_global-1 +! ny_global A BH CG DF E FD GC HB A +! ny_global-1 A B C D E F G H +! ny_global-2 A B C D E F G H +! +!----------------------------------------------------------------------- + use mpi ! MPI Fortran module use ice_kinds_mod use ice_communicate, only: my_task, mpiR4, mpiR8, mpitagHalo use ice_constants, only: field_type_scalar, & field_type_vector, field_type_angle, & + field_type_unknown, field_type_noupdate, & field_loc_center, field_loc_NEcorner, & - field_loc_Nface, field_loc_Eface + field_loc_Nface, field_loc_Eface, & + field_loc_unknown, field_loc_noupdate use ice_global_reductions, only: global_maxval use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted @@ -1229,6 +1268,23 @@ subroutine ice_HaloUpdate2DR8(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate2DR8)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -1552,7 +1608,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -1653,6 +1709,23 @@ subroutine ice_HaloUpdate2DR4(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate2DR4)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -1950,7 +2023,7 @@ subroutine ice_HaloUpdate2DR4(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -2051,6 +2124,23 @@ subroutine ice_HaloUpdate2DI4(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate2DI4)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -2348,7 +2438,7 @@ subroutine ice_HaloUpdate2DI4(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -2426,6 +2516,23 @@ subroutine ice_HaloUpdate2DL1(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate2DL1)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! copy logical into integer array and call haloupdate on integer array @@ -2519,6 +2626,23 @@ subroutine ice_HaloUpdate3DR8(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate3DR8)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -2858,7 +2982,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -2976,6 +3100,23 @@ subroutine ice_HaloUpdate3DR4(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate3DR4)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -3315,7 +3456,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -3433,6 +3574,23 @@ subroutine ice_HaloUpdate3DI4(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate3DI4)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -3772,7 +3930,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -3890,6 +4048,23 @@ subroutine ice_HaloUpdate4DR8(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate4DR8)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -4251,7 +4426,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -4371,6 +4546,23 @@ subroutine ice_HaloUpdate4DR4(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate4DR4)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -4732,7 +4924,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -4852,6 +5044,23 @@ subroutine ice_HaloUpdate4DI4(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate4DI4)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -5213,7 +5422,7 @@ subroutine ice_HaloUpdate4DI4(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -5265,6 +5474,7 @@ end subroutine ice_HaloUpdate4DI4 !*********************************************************************** ! This routine updates ghost cells for an input array using ! a second array as needed by the stress fields. +! This is just like 2DR8 except no averaging and only on tripole subroutine ice_HaloUpdate_stress(array1, array2, halo, & fieldLoc, fieldKind, & @@ -5319,6 +5529,23 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate_stress)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -5485,30 +5712,61 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & call abort_ice(subname//'ERROR: Unknown field kind') end select - select case (fieldLoc) - case (field_loc_center) ! cell center location + if (halo%tripoleTFlag) then + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = -1 + joffset = 0 - ioffset = 0 - joffset = 0 + case (field_loc_NEcorner) ! cell corner location - case (field_loc_NEcorner) ! cell corner location + ioffset = 0 + joffset = 1 - ioffset = 1 - joffset = 1 + case (field_loc_Eface) ! cell center location - case (field_loc_Eface) + ioffset = 0 + joffset = 0 - ioffset = 1 - joffset = 0 + case (field_loc_Nface) ! cell corner (velocity) location - case (field_loc_Nface) + ioffset = -1 + joffset = 1 - ioffset = 0 - joffset = 1 + case default + call abort_ice(subname//'ERROR: Unknown field location') + end select - case default - call abort_ice(subname//'ERROR: Unknown field location') - end select + else ! tripole u-fold + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = 0 + joffset = 0 + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 1 + joffset = 1 + + case (field_loc_Eface) + + ioffset = 1 + joffset = 0 + + case (field_loc_Nface) + + ioffset = 0 + joffset = 1 + + case default + call abort_ice(subname//'ERROR: Unknown field location') + end select + + endif !*** copy out of global tripole buffer into local !*** ghost cells @@ -5531,14 +5789,15 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal + if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface, do not need to replace !*** top row of physical domain, so jSrc should be !*** out of range and skipped !*** otherwise do the copy - if (jSrc <= nghost+1 .AND. jDst /= -1 ) then + if (jSrc <= halo%tripoleRows .and. jSrc>0 .and. jDst>0) then array1(iDst,jDst,dstBlock) = isign*bufTripoleR8(iSrc,jSrc) endif diff --git a/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 index 2b81c4441..aaebcfaad 100644 --- a/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 +++ b/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 @@ -12,13 +12,20 @@ module ice_boundary ! fixes for non-existent blocks ! 2008-01-28: Elizabeth Hunke replaced old routines with new POP ! infrastructure +! 2023-03-09: Tony Craig updated the implementation to fix bug in +! tripoleT and reduce number of copies in tripole overall. +! Because all blocks are local, can fill the tripole +! buffer from "north" copies. This is not true for +! the MPI version. use ice_kinds_mod use ice_communicate, only: my_task use ice_constants, only: field_type_scalar, & field_type_vector, field_type_angle, & + field_type_unknown, field_type_noupdate, & field_loc_center, field_loc_NEcorner, & - field_loc_Nface, field_loc_Eface + field_loc_Nface, field_loc_Eface, & + field_loc_unknown, field_loc_noupdate use ice_global_reductions, only: global_maxval use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted @@ -310,10 +317,11 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !*** for that !echmod if (tripoleBlock .and. dstProc /= srcProc) then - if (tripoleBlock) then - call ice_HaloIncrementMsgCount(sendCount, recvCount, & - srcProc, dstProc, northMsgSize) - endif +! tcx,tcraig, 3/2023, this is not needed +! if (tripoleBlock) then +! call ice_HaloIncrementMsgCount(sendCount, recvCount, & +! srcProc, dstProc, northMsgSize) +! endif !*** find west neighbor block and add to message count @@ -336,10 +344,11 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !*** for that !echmod if (tripoleBlock .and. dstProc /= srcProc) then - if (tripoleBlock) then - call ice_HaloIncrementMsgCount(sendCount, recvCount, & - srcProc, dstProc, northMsgSize) - endif +! tcx,tcraig, 3/2023, this is not needed +! if (tripoleBlock) then +! call ice_HaloIncrementMsgCount(sendCount, recvCount, & +! srcProc, dstProc, northMsgSize) +! endif !*** find northeast neighbor block and add to message count @@ -352,11 +361,12 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & call ice_distributionGetBlockLoc(dist, neBlock, dstProc, & dstLocalID) - else if (neBlock < 0) then ! tripole north row - msgSize = northMsgSize ! tripole needs whole top row of block - - call ice_distributionGetBlockLoc(dist, abs(neBlock), dstProc, & - dstLocalID) +! tcx,tcraig, 3/2023, this is not needed +! else if (neBlock < 0) then ! tripole north row +! msgSize = northMsgSize ! tripole needs whole top row of block +! +! call ice_distributionGetBlockLoc(dist, abs(neBlock), dstProc, & +! dstLocalID) else dstProc = 0 dstLocalID = 0 @@ -376,11 +386,12 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & call ice_distributionGetBlockLoc(dist, nwBlock, dstProc, & dstLocalID) - else if (nwBlock < 0) then ! tripole north row, count block - msgSize = northMsgSize ! tripole NE corner update - entire row needed - - call ice_distributionGetBlockLoc(dist, abs(nwBlock), dstProc, & - dstLocalID) +! tcx,tcraig, 3/2023, this is not needed +! else if (nwBlock < 0) then ! tripole north row, count block +! msgSize = northMsgSize ! tripole NE corner update - entire row needed +! +! call ice_distributionGetBlockLoc(dist, abs(nwBlock), dstProc, & +! dstLocalID) else dstProc = 0 @@ -482,8 +493,6 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & northBlock = ice_blocksGetNbrID(iblock, ice_blocksNorth, & ewBoundaryType, nsBoundaryType) - call ice_HaloMsgCreate(halo, dist, iblock, northBlock, 'north') - !*** set tripole flag and add two copies for inserting !*** and extracting info from the tripole buffer @@ -493,6 +502,7 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & call ice_HaloMsgCreate(halo, dist, -iblock, iblock, 'north') else tripoleBlock = .false. + call ice_HaloMsgCreate(halo, dist, iblock, northBlock, 'north') endif !*** find south neighbor block @@ -513,9 +523,10 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !*** the east block to make sure enough information is !*** available for tripole manipulations - if (tripoleBlock) then - call ice_HaloMsgCreate(halo, dist, iblock, -eastBlock, 'north') - endif +! tcx,tcraig, 3/2023, this is not needed +! if (tripoleBlock) then +! call ice_HaloMsgCreate(halo, dist, iblock, -eastBlock, 'north') +! endif !*** find west neighbor block @@ -528,9 +539,10 @@ function ice_HaloCreate(dist, nsBoundaryType, ewBoundaryType, & !*** the west block to make sure enough information is !*** available for tripole manipulations - if (tripoleBlock) then - call ice_HaloMsgCreate(halo, dist, iblock, -westBlock, 'north') - endif +! tcx,tcraig, 3/2023, this is not needed +! if (tripoleBlock) then +! call ice_HaloMsgCreate(halo, dist, iblock, -westBlock, 'north') +! endif !*** find northeast neighbor block @@ -699,6 +711,23 @@ subroutine ice_HaloUpdate2DR8(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate2DR8)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -936,7 +965,7 @@ subroutine ice_HaloUpdate2DR8(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -1011,6 +1040,23 @@ subroutine ice_HaloUpdate2DR4(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate2DR4)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -1228,7 +1274,7 @@ subroutine ice_HaloUpdate2DR4(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -1303,6 +1349,23 @@ subroutine ice_HaloUpdate2DI4(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate2DI4)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -1520,7 +1583,7 @@ subroutine ice_HaloUpdate2DI4(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -1583,6 +1646,23 @@ subroutine ice_HaloUpdate2DL1(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate2DL1)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! copy logical into integer array and call haloupdate on integer array @@ -1662,6 +1742,23 @@ subroutine ice_HaloUpdate3DR8(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate3DR8)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -1896,7 +1993,7 @@ subroutine ice_HaloUpdate3DR8(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -1980,6 +2077,23 @@ subroutine ice_HaloUpdate3DR4(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate3DR4)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -2214,7 +2328,7 @@ subroutine ice_HaloUpdate3DR4(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -2298,6 +2412,23 @@ subroutine ice_HaloUpdate3DI4(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate3DI4)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -2532,7 +2663,7 @@ subroutine ice_HaloUpdate3DI4(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -2616,6 +2747,23 @@ subroutine ice_HaloUpdate4DR8(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate4DR8)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -2865,7 +3013,7 @@ subroutine ice_HaloUpdate4DR8(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -2951,6 +3099,23 @@ subroutine ice_HaloUpdate4DR4(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate4DR4)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -3200,7 +3365,7 @@ subroutine ice_HaloUpdate4DR4(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -3286,6 +3451,23 @@ subroutine ice_HaloUpdate4DI4(array, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate4DI4)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -3535,7 +3717,7 @@ subroutine ice_HaloUpdate4DI4(array, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface on u-fold, and NE corner and Nface @@ -3567,6 +3749,7 @@ end subroutine ice_HaloUpdate4DI4 !*********************************************************************** ! This routine updates ghost cells for an input array using ! a second array as needed by the stress fields. +! This is just like 2DR8 except no averaging and only on tripole subroutine ice_HaloUpdate_stress(array1, array2, halo, & fieldLoc, fieldKind, & @@ -3610,6 +3793,23 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & character(len=*), parameter :: subname = '(ice_HaloUpdate_stress)' +!----------------------------------------------------------------------- +! +! abort or return on unknown or noupdate field_loc or field_type +! +!----------------------------------------------------------------------- + + if (fieldLoc == field_loc_unknown .or. & + fieldKind == field_type_unknown) then + call abort_ice(subname//'ERROR: use of field_loc/type_unknown not allowed') + return + endif + + if (fieldLoc == field_loc_noupdate .or. & + fieldKind == field_type_noupdate) then + return + endif + !----------------------------------------------------------------------- ! ! initialize error code and fill value @@ -3697,30 +3897,61 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & call abort_ice(subname//'ERROR: Unknown field kind') end select - select case (fieldLoc) - case (field_loc_center) ! cell center location + if (halo%tripoleTFlag) then - ioffset = 0 - joffset = 0 + select case (fieldLoc) + case (field_loc_center) ! cell center location - case (field_loc_NEcorner) ! cell corner location + ioffset = -1 + joffset = 0 - ioffset = 1 - joffset = 1 + case (field_loc_NEcorner) ! cell corner location - case (field_loc_Eface) + ioffset = 0 + joffset = 1 - ioffset = 1 - joffset = 0 + case (field_loc_Eface) ! cell center location - case (field_loc_Nface) + ioffset = 0 + joffset = 0 - ioffset = 0 - joffset = 1 + case (field_loc_Nface) ! cell corner (velocity) location - case default - call abort_ice(subname//'ERROR: Unknown field location') - end select + ioffset = -1 + joffset = 1 + + case default + call abort_ice(subname//'ERROR: Unknown field location') + end select + + else ! tripole u-fold + + select case (fieldLoc) + case (field_loc_center) ! cell center location + + ioffset = 0 + joffset = 0 + + case (field_loc_NEcorner) ! cell corner location + + ioffset = 1 + joffset = 1 + + case (field_loc_Eface) + + ioffset = 1 + joffset = 0 + + case (field_loc_Nface) + + ioffset = 0 + joffset = 1 + + case default + call abort_ice(subname//'ERROR: Unknown field location') + end select + + endif !*** copy out of global tripole buffer into local !*** ghost cells @@ -3743,14 +3974,15 @@ subroutine ice_HaloUpdate_stress(array1, array2, halo, & !*** correct for offsets iSrc = iSrc - ioffset jSrc = jSrc - joffset - if (iSrc == 0) iSrc = nxGlobal + if (iSrc < 1 ) iSrc = iSrc + nxGlobal + if (iSrc > nxGlobal) iSrc = iSrc - nxGlobal !*** for center and Eface, do not need to replace !*** top row of physical domain, so jSrc should be !*** out of range and skipped !*** otherwise do the copy - if (jSrc <= nghost+1) then + if (jSrc <= halo%tripoleRows .and. jSrc>0 .and. jDst>0) then array1(iDst,jDst,dstBlock) = isign*bufTripoleR8(iSrc,jSrc) endif @@ -4159,36 +4391,37 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) halo%numLocalCopies = msgIndx - else - - !*** tripole grid - copy entire top halo+1 - !*** rows into global buffer at src location - - msgIndx = halo%numLocalCopies - - do j=1,nghost+1 - do i=1,ieSrc-ibSrc+1 - - msgIndx = msgIndx + 1 - - halo%srcLocalAddr(1,msgIndx) = ibSrc + i - 1 - halo%srcLocalAddr(2,msgIndx) = jeSrc-1-nghost+j - halo%srcLocalAddr(3,msgIndx) = srcLocalID - - halo%dstLocalAddr(1,msgIndx) = iGlobal(ibSrc + i - 1) - halo%dstLocalAddr(2,msgIndx) = j - halo%dstLocalAddr(3,msgIndx) = -dstLocalID - - end do - end do - - halo%numLocalCopies = msgIndx +! tcx,tcraig, 3/2023, this is not needed +! else +! +! !*** tripole grid - copy entire top halo+1 +! !*** rows into global buffer at src location +! +! msgIndx = halo%numLocalCopies +! +! do j=1,nghost+1 +! do i=1,ieSrc-ibSrc+1 +! +! msgIndx = msgIndx + 1 +! +! halo%srcLocalAddr(1,msgIndx) = ibSrc + i - 1 +! halo%srcLocalAddr(2,msgIndx) = jeSrc-1-nghost+j +! halo%srcLocalAddr(3,msgIndx) = srcLocalID +! +! halo%dstLocalAddr(1,msgIndx) = iGlobal(ibSrc + i - 1) +! halo%dstLocalAddr(2,msgIndx) = j +! halo%dstLocalAddr(3,msgIndx) = -dstLocalID +! +! end do +! end do +! +! halo%numLocalCopies = msgIndx endif case ('northwest') - !*** normal northeast boundary - just copy NW corner + !*** normal northwest boundary - just copy NW corner !*** of physical domain into SE halo of NW nbr block if (dstBlock > 0) then @@ -4213,30 +4446,31 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) halo%numLocalCopies = msgIndx - else - - !*** tripole grid - copy entire top halo+1 - !*** rows into global buffer at src location - - msgIndx = halo%numLocalCopies - - do j=1,nghost+1 - do i=1,ieSrc-ibSrc+1 - - msgIndx = msgIndx + 1 - - halo%srcLocalAddr(1,msgIndx) = ibSrc + i - 1 - halo%srcLocalAddr(2,msgIndx) = jeSrc-1-nghost+j - halo%srcLocalAddr(3,msgIndx) = srcLocalID - - halo%dstLocalAddr(1,msgIndx) = iGlobal(ibSrc + i - 1) - halo%dstLocalAddr(2,msgIndx) = j - halo%dstLocalAddr(3,msgIndx) = -dstLocalID - - end do - end do - - halo%numLocalCopies = msgIndx +! tcx,tcraig, 3/2023, this is not needed +! else +! +! !*** tripole grid - copy entire top halo+1 +! !*** rows into global buffer at src location +! +! msgIndx = halo%numLocalCopies +! +! do j=1,nghost+1 +! do i=1,ieSrc-ibSrc+1 +! +! msgIndx = msgIndx + 1 +! +! halo%srcLocalAddr(1,msgIndx) = ibSrc + i - 1 +! halo%srcLocalAddr(2,msgIndx) = jeSrc-1-nghost+j +! halo%srcLocalAddr(3,msgIndx) = srcLocalID +! +! halo%dstLocalAddr(1,msgIndx) = iGlobal(ibSrc + i - 1) +! halo%dstLocalAddr(2,msgIndx) = j +! halo%dstLocalAddr(3,msgIndx) = -dstLocalID +! +! end do +! end do +! +! halo%numLocalCopies = msgIndx endif @@ -4444,7 +4678,7 @@ subroutine ice_HaloMsgCreate(halo, dist, srcBlock, dstBlock, direction) case ('northwest') - !*** normal northeast boundary - just copy NW corner + !*** normal northwest boundary - just copy NW corner !*** of physical domain into SE halo of NW nbr block if (dstBlock > 0) then diff --git a/cicecore/cicedyn/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 index 0d56d3400..770ee9ed9 100644 --- a/cicecore/cicedyn/infrastructure/ice_grid.F90 +++ b/cicecore/cicedyn/infrastructure/ice_grid.F90 @@ -360,6 +360,11 @@ subroutine init_grid1 file=__FILE__, line=__LINE__) endif + if (grid_type == 'tripole' .and. (mod(nx_global,2)/=0)) then + call abort_ice(subname//'ERROR: grid_type tripole requires even nx_global number', & + file=__FILE__, line=__LINE__) + endif + if (trim(grid_type) == 'displaced_pole' .or. & trim(grid_type) == 'tripole' .or. & trim(grid_type) == 'regional' ) then diff --git a/cicecore/drivers/unittest/halochk/CICE_InitMod.F90 b/cicecore/drivers/unittest/halochk/CICE_InitMod.F90 new file mode 100644 index 000000000..9ed1c5cbc --- /dev/null +++ b/cicecore/drivers/unittest/halochk/CICE_InitMod.F90 @@ -0,0 +1,472 @@ +!======================================================================= +! +! This module contains the CICE initialization routine that sets model +! parameters and initializes the grid and CICE state variables. +! +! authors Elizabeth C. Hunke, LANL +! William H. Lipscomb, LANL +! Philip W. Jones, LANL +! +! 2006: Converted to free form source (F90) by Elizabeth Hunke +! 2008: E. Hunke moved ESMF code to its own driver + + module CICE_InitMod + + use ice_kinds_mod + use ice_exit, only: abort_ice + use ice_fileunits, only: init_fileunits, nu_diag + use icepack_intfc, only: icepack_aggregate + use icepack_intfc, only: icepack_init_itd, icepack_init_itd_hist + use icepack_intfc, only: icepack_init_fsd_bounds, icepack_init_wave + use icepack_intfc, only: icepack_configure + use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted + use icepack_intfc, only: icepack_query_parameters, icepack_query_tracer_flags, & + icepack_query_tracer_indices, icepack_query_tracer_sizes + + implicit none + private + public :: CICE_Initialize, cice_init + +!======================================================================= + + contains + +!======================================================================= + +! Initialize the basic state, grid and all necessary parameters for +! running the CICE model. Return the initial state in routine +! export state. +! Note: This initialization driver is designed for standalone and +! CESM-coupled applications. For other +! applications (e.g., standalone CAM), this driver would be +! replaced by a different driver that calls subroutine cice_init, +! where most of the work is done. + + subroutine CICE_Initialize + + character(len=*), parameter :: subname='(CICE_Initialize)' + !-------------------------------------------------------------------- + ! model initialization + !-------------------------------------------------------------------- + + call cice_init + + end subroutine CICE_Initialize + +!======================================================================= +! +! Initialize CICE model. + + subroutine cice_init + + use ice_arrays_column, only: hin_max, c_hi_range, alloc_arrays_column + use ice_arrays_column, only: floe_rad_l, floe_rad_c, & + floe_binwidth, c_fsd_range + use ice_state, only: alloc_state + use ice_flux_bgc, only: alloc_flux_bgc + use ice_calendar, only: dt, dt_dyn, istep, istep1, write_ic, & + init_calendar, advance_timestep, calc_timesteps + use ice_communicate, only: init_communicate, my_task, master_task + use ice_diagnostics, only: init_diags + use ice_domain, only: init_domain_blocks + use ice_domain_size, only: ncat, nfsd + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp + use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn + use ice_flux, only: init_coupler_flux, init_history_therm, & + init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux + use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & + get_forcing_atmo, get_forcing_ocn, get_wave_spec + use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & + faero_default, faero_optics, alloc_forcing_bgc, fiso_default + use ice_grid, only: init_grid1, init_grid2, alloc_grid + use ice_history, only: init_hist, accum_hist + use ice_restart_shared, only: restart, runtype + use ice_init, only: input_data, init_state + use ice_init_column, only: init_thermo_vertical, init_shortwave, init_zbgc, input_zbgc, count_tracers + use ice_kinds_mod + use ice_restoring, only: ice_HaloRestore_init + use ice_timers, only: timer_total, init_ice_timers, ice_timer_start + use ice_transport_driver, only: init_transport + + logical(kind=log_kind) :: tr_aero, tr_zaero, skl_bgc, z_tracers, & + tr_iso, tr_fsd, wave_spec + character(len=*), parameter :: subname = '(cice_init)' + + call init_communicate ! initial setup for message passing + call init_fileunits ! unit numbers + + ! tcx debug, this will create a different logfile for each pe + ! if (my_task /= master_task) nu_diag = 100+my_task + + call icepack_configure() ! initialize icepack + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + call input_data ! namelist variables + call input_zbgc ! vertical biogeochemistry namelist + call count_tracers ! count tracers + + call init_domain_blocks ! set up block decomposition + call init_grid1 ! domain distribution + call alloc_grid ! allocate grid arrays + call alloc_arrays_column ! allocate column arrays + call alloc_state ! allocate state arrays + call alloc_flux_bgc ! allocate flux_bgc arrays + call alloc_flux ! allocate flux arrays + call init_ice_timers ! initialize all timers + call ice_timer_start(timer_total) ! start timing entire run + call init_grid2 ! grid variables + call init_zbgc ! vertical biogeochemistry initialization + call init_calendar ! initialize some calendar stuff + call init_hist (dt) ! initialize output history file + + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then + call init_eap ! define eap dynamics parameters, variables + else if (kdyn == 3) then + call init_vp ! define vp dynamics parameters, variables + endif + + call init_coupler_flux ! initialize fluxes exchanged with coupler + + call init_thermo_vertical ! initialize vertical thermodynamics + + call icepack_init_itd(ncat=ncat, hin_max=hin_max) ! ice thickness distribution + if (my_task == master_task) then + call icepack_init_itd_hist(ncat=ncat, hin_max=hin_max, c_hi_range=c_hi_range) ! output + endif + + call icepack_query_tracer_flags(tr_fsd_out=tr_fsd) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + if (tr_fsd) call icepack_init_fsd_bounds (nfsd, & ! floe size distribution + floe_rad_l, & ! fsd size lower bound in m (radius) + floe_rad_c, & ! fsd size bin centre in m (radius) + floe_binwidth, & ! fsd size bin width in m (radius) + c_fsd_range, & ! string for history output + write_diags=(my_task == master_task)) ! write diag on master only + + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + call init_forcing_ocn(dt) ! initialize sss and sst from data + call init_state ! initialize the ice state + call init_transport ! initialize horizontal transport + call ice_HaloRestore_init ! restored boundary conditions + + call icepack_query_parameters(skl_bgc_out=skl_bgc, z_tracers_out=z_tracers, & + wave_spec_out=wave_spec) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + if (skl_bgc .or. z_tracers) call alloc_forcing_bgc ! allocate biogeochemistry arrays + + call init_restart ! initialize restart variables + call init_diags ! initialize diagnostic output points + call init_history_therm ! initialize thermo history variables + call init_history_dyn ! initialize dynamic history variables + call calc_timesteps ! update timestep counter if not using npt_unit="1" + + call icepack_query_tracer_flags(tr_aero_out=tr_aero, tr_zaero_out=tr_zaero) + call icepack_query_tracer_flags(tr_iso_out=tr_iso) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + !property tables + + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing + ! in prep_radiation. + if (trim(runtype) == 'continue' .or. restart) & + call init_shortwave ! initialize radiative transfer + +! tcraig, use advance_timestep here +! istep = istep + 1 ! update time step counters +! istep1 = istep1 + 1 +! time = time + dt ! determine the time and date +! call calendar(time) ! at the end of the first timestep + call advance_timestep() + + !-------------------------------------------------------------------- + ! coupler communication or forcing data initialization + !-------------------------------------------------------------------- + + call init_forcing_atmo ! initialize atmospheric forcing (standalone) + + if (tr_fsd .and. wave_spec) call get_wave_spec ! wave spectrum in ice + call get_forcing_atmo ! atmospheric forcing from data + call get_forcing_ocn(dt) ! ocean forcing from data + + ! isotopes + if (tr_iso) call fiso_default ! default values + ! aerosols + ! if (tr_aero) call faero_data ! data file + ! if (tr_zaero) call fzaero_data ! data file (gx1) + if (tr_aero .or. tr_zaero) call faero_default ! default values + if (skl_bgc .or. z_tracers) call get_forcing_bgc ! biogeochemistry + if (z_tracers) call get_atm_bgc ! biogeochemistry + + if (runtype == 'initial' .and. .not. restart) & + call init_shortwave ! initialize radiative transfer using current swdn + + call init_flux_atm ! initialize atmosphere fluxes sent to coupler + call init_flux_ocn ! initialize ocean fluxes sent to coupler + + if (write_ic) call accum_hist(dt) ! write initial conditions + + end subroutine cice_init + +!======================================================================= + + subroutine init_restart + + use ice_arrays_column, only: dhsn + use ice_blocks, only: nx_block, ny_block + use ice_calendar, only: calendar + use ice_constants, only: c0 + use ice_domain, only: nblocks + use ice_domain_size, only: ncat, n_iso, n_aero, nfsd + use ice_dyn_eap, only: read_restart_eap + use ice_dyn_shared, only: kdyn + use ice_grid, only: tmask + use ice_init, only: ice_ic + use ice_init_column, only: init_age, init_FY, init_lvl, & + init_meltponds_lvl, init_meltponds_topo, & + init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd + use ice_restart_column, only: restart_age, read_restart_age, & + restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & + restart_pond_lvl, read_restart_pond_lvl, & + restart_pond_topo, read_restart_pond_topo, & + restart_fsd, read_restart_fsd, & + restart_iso, read_restart_iso, & + restart_aero, read_restart_aero, & + restart_hbrine, read_restart_hbrine, & + restart_zsal, restart_bgc + use ice_restart_driver, only: restartfile + use ice_restart_shared, only: runtype, restart + use ice_state ! almost everything + + integer(kind=int_kind) :: & + i, j , & ! horizontal indices + iblk ! block index + logical(kind=log_kind) :: & + tr_iage, tr_FY, tr_lvl, tr_pond_lvl, & + tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, & + skl_bgc, z_tracers, solve_zsal + integer(kind=int_kind) :: & + ntrcr + integer(kind=int_kind) :: & + nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, & + nt_iage, nt_FY, nt_aero, nt_fsd, nt_isosno, nt_isoice + + character(len=*), parameter :: subname = '(init_restart)' + + call icepack_query_tracer_sizes(ntrcr_out=ntrcr) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + call icepack_query_parameters(skl_bgc_out=skl_bgc, & + z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) + call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & + tr_lvl_out=tr_lvl, tr_pond_lvl_out=tr_pond_lvl, & + tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & + tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) + call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & + nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, nt_ipnd_out=nt_ipnd, & + nt_iage_out=nt_iage, nt_FY_out=nt_FY, nt_aero_out=nt_aero, nt_fsd_out=nt_fsd, & + nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + if (trim(runtype) == 'continue') then + ! start from core restart file + call restartfile() ! given by pointer in ice_in + call calendar() ! update time parameters + if (kdyn == 2) call read_restart_eap ! EAP + else if (restart) then ! ice_ic = core restart file + call restartfile (ice_ic) ! or 'default' or 'none' + !!! uncomment to create netcdf + ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file + !!! uncomment if EAP restart data exists + ! if (kdyn == 2) call read_restart_eap + endif + + ! tracers + ! ice age tracer + if (tr_iage) then + if (trim(runtype) == 'continue') & + restart_age = .true. + if (restart_age) then + call read_restart_age + else + do iblk = 1, nblocks + call init_age(trcrn(:,:,nt_iage,:,iblk)) + enddo ! iblk + endif + endif + ! first-year area tracer + if (tr_FY) then + if (trim(runtype) == 'continue') restart_FY = .true. + if (restart_FY) then + call read_restart_FY + else + do iblk = 1, nblocks + call init_FY(trcrn(:,:,nt_FY,:,iblk)) + enddo ! iblk + endif + endif + ! level ice tracer + if (tr_lvl) then + if (trim(runtype) == 'continue') restart_lvl = .true. + if (restart_lvl) then + call read_restart_lvl + else + do iblk = 1, nblocks + call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & + trcrn(:,:,nt_vlvl,:,iblk)) + enddo ! iblk + endif + endif + ! level-ice melt ponds + if (tr_pond_lvl) then + if (trim(runtype) == 'continue') & + restart_pond_lvl = .true. + if (restart_pond_lvl) then + call read_restart_pond_lvl + else + do iblk = 1, nblocks + call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & + trcrn(:,:,nt_hpnd,:,iblk), & + trcrn(:,:,nt_ipnd,:,iblk), & + dhsn(:,:,:,iblk)) + enddo ! iblk + endif + endif + ! topographic melt ponds + if (tr_pond_topo) then + if (trim(runtype) == 'continue') & + restart_pond_topo = .true. + if (restart_pond_topo) then + call read_restart_pond_topo + else + do iblk = 1, nblocks + call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & + trcrn(:,:,nt_hpnd,:,iblk), & + trcrn(:,:,nt_ipnd,:,iblk)) + enddo ! iblk + endif ! .not. restart_pond + endif + ! floe size distribution + if (tr_fsd) then + if (trim(runtype) == 'continue') restart_fsd = .true. + if (restart_fsd) then + call read_restart_fsd + else + call init_fsd(trcrn(:,:,nt_fsd:nt_fsd+nfsd-1,:,:)) + endif + endif + + ! isotopes + if (tr_iso) then + if (trim(runtype) == 'continue') restart_iso = .true. + if (restart_iso) then + call read_restart_iso + else + do iblk = 1, nblocks + call init_isotope(trcrn(:,:,nt_isosno:nt_isosno+n_iso-1,:,iblk), & + trcrn(:,:,nt_isoice:nt_isoice+n_iso-1,:,iblk)) + enddo ! iblk + endif + endif + + if (tr_aero) then ! ice aerosol + if (trim(runtype) == 'continue') restart_aero = .true. + if (restart_aero) then + call read_restart_aero + else + do iblk = 1, nblocks + call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) + enddo ! iblk + endif ! .not. restart_aero + endif + + if (trim(runtype) == 'continue') then + if (tr_brine) & + restart_hbrine = .true. + if (solve_zsal) & + restart_zsal = .true. + if (skl_bgc .or. z_tracers) & + restart_bgc = .true. + endif + + if (tr_brine .or. skl_bgc) then ! brine height tracer + call init_hbrine + if (tr_brine .and. restart_hbrine) call read_restart_hbrine + endif + + if (solve_zsal .or. skl_bgc .or. z_tracers) then ! biogeochemistry + if (tr_fsd) then + write (nu_diag,*) 'FSD implementation incomplete for use with BGC' + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + endif + call init_bgc + endif + + !----------------------------------------------------------------- + ! aggregate tracers + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j,iblk)) then + call icepack_aggregate(ncat = ncat, & + aicen = aicen(i,j,:,iblk), & + trcrn = trcrn(i,j,:,:,iblk), & + vicen = vicen(i,j,:,iblk), & + vsnon = vsnon(i,j,:,iblk), & + aice = aice (i,j, iblk), & + trcr = trcr (i,j,:,iblk), & + vice = vice (i,j, iblk), & + vsno = vsno (i,j, iblk), & + aice0 = aice0(i,j, iblk), & + ntrcr = ntrcr, & + trcr_depend = trcr_depend, & + trcr_base = trcr_base, & + n_trcr_strata = n_trcr_strata, & + nt_strata = nt_strata) + else + ! tcraig, reset all tracer values on land to zero + trcrn(i,j,:,:,iblk) = c0 + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + end subroutine init_restart + +!======================================================================= + + end module CICE_InitMod + +!======================================================================= diff --git a/cicecore/drivers/unittest/halochk/halochk.F90 b/cicecore/drivers/unittest/halochk/halochk.F90 new file mode 100644 index 000000000..29eaa8150 --- /dev/null +++ b/cicecore/drivers/unittest/halochk/halochk.F90 @@ -0,0 +1,811 @@ + + module halochk_data + + use CICE_InitMod + use ice_kinds_mod, only: int_kind, dbl_kind, real_kind, log_kind + use ice_blocks, only: block, get_block, nx_block, ny_block, nblocks_tot, nghost + use ice_boundary, only: ice_HaloUpdate, ice_HaloUpdate_stress + use ice_constants, only: c0, c1, p5, & + field_loc_unknown, field_loc_noupdate, & + field_loc_center, field_loc_NEcorner, & + field_loc_Nface, field_loc_Eface, & + field_type_unknown, field_type_noupdate, & + field_type_scalar, field_type_vector, field_type_angle + use ice_communicate, only: my_task, master_task, get_num_procs, MPI_COMM_ICE + use ice_distribution, only: ice_distributionGetBlockID, ice_distributionGet + use ice_domain_size, only: nx_global, ny_global, & + block_size_x, block_size_y, max_blocks + use ice_domain, only: distrb_info, halo_info, & + ew_boundary_type, ns_boundary_type + use ice_exit, only: abort_ice, end_run + use ice_global_reductions, only: global_minval, global_maxval, global_sum + + implicit none + + integer(int_kind), parameter :: & + passflag = 0, & + failflag = 1 + + end module halochk_data + +!======================================================================= + + program halochk + + ! This tests the CICE halo update methods by + ! using CICE_InitMod (from the standalone model) to read/initialize + ! a CICE grid/configuration. + + use halochk_data + + implicit none + + integer(int_kind) :: nn, nl, nt, i, j, k1, k2, n, ib, ie, jb, je + integer(int_kind) :: iblock, itrip, ioffset, joffset + integer(int_kind) :: blockID, numBlocks, jtrip + type (block) :: this_block + + real(dbl_kind) , allocatable :: darrayi1(:,:,:) , darrayj1(:,:,:) + real(dbl_kind) , allocatable :: darrayi2(:,:,:,:) , darrayj2(:,:,:,:) + real(dbl_kind) , allocatable :: darrayi3(:,:,:,:,:), darrayj3(:,:,:,:,:) + real(real_kind) , allocatable :: rarrayi1(:,:,:) , rarrayj1(:,:,:) + real(real_kind) , allocatable :: rarrayi2(:,:,:,:) , rarrayj2(:,:,:,:) + real(real_kind) , allocatable :: rarrayi3(:,:,:,:,:), rarrayj3(:,:,:,:,:) + integer(int_kind), allocatable :: iarrayi1(:,:,:) , iarrayj1(:,:,:) + integer(int_kind), allocatable :: iarrayi2(:,:,:,:) , iarrayj2(:,:,:,:) + integer(int_kind), allocatable :: iarrayi3(:,:,:,:,:), iarrayj3(:,:,:,:,:) + logical(log_kind), allocatable :: larrayi1(:,:,:) , larrayj1(:,:,:) + real(dbl_kind) , allocatable :: darrayi1str(:,:,:) , darrayj1str(:,:,:) + real(dbl_kind) , allocatable :: darrayi10(:,:,:) , darrayj10(:,:,:) + + real(dbl_kind), allocatable :: cidata_bas(:,:,:,:,:),cjdata_bas(:,:,:,:,:) + real(dbl_kind), allocatable :: cidata_nup(:,:,:,:,:),cjdata_nup(:,:,:,:,:) + real(dbl_kind), allocatable :: cidata_std(:,:,:,:,:),cjdata_std(:,:,:,:,:) + + integer(int_kind), parameter :: maxtests = 11 + integer(int_kind), parameter :: maxtypes = 4 + integer(int_kind), parameter :: maxlocs = 5 + integer(int_kind), parameter :: nz1 = 3 + integer(int_kind), parameter :: nz2 = 4 + real(dbl_kind) :: aichk,ajchk,cichk,cjchk,rival,rjval,rsign + character(len=16) :: locs_name(maxlocs), types_name(maxtypes) + integer(int_kind) :: field_loc(maxlocs), field_type(maxtypes) + integer(int_kind) :: npes, ierr, ntask, testcnt, tottest, tpcnt, tfcnt + integer(int_kind) :: errorflag0, gflag, k1m, k2m, ptcntsum, failcntsum + integer(int_kind), allocatable :: errorflag(:) + integer(int_kind), allocatable :: ptcnt(:), failcnt(:) + character(len=128), allocatable :: teststring(:) + character(len=32) :: halofld + logical :: tripole_average, tripole_pole, spvalL1 + logical :: first_call = .true. + + real(dbl_kind) , parameter :: fillval = -88888.0_dbl_kind + real(dbl_kind) , parameter :: dhalofillval = -999.0_dbl_kind + real(real_kind) , parameter :: rhalofillval = -999.0_real_kind + integer(int_kind), parameter :: ihalofillval = -999 + character(len=*) , parameter :: subname='(halochk)' + + !----------------------------------------------------------------- + ! Initialize CICE + !----------------------------------------------------------------- + + call CICE_Initialize + npes = get_num_procs() + + locs_name (:) = 'unknown' + types_name(:) = 'unknown' + field_type(:) = field_type_unknown + field_loc (:) = field_loc_unknown + + types_name(1) = 'scalar' + field_type(1) = field_type_scalar + types_name(2) = 'vector' + field_type(2) = field_type_vector + types_name(3) = 'angle' + field_type(3) = field_type_angle + types_name(4) = 'none' + field_type(4) = field_type_noupdate +! types_name(5) = 'unknown' +! field_type(5) = field_type_unknown ! aborts in CICE, as expected + + locs_name (1) = 'center' + field_loc (1) = field_loc_center + locs_name (2) = 'NEcorner' + field_loc (2) = field_loc_NEcorner + locs_name (3) = 'Nface' + field_loc (3) = field_loc_Nface + locs_name (4) = 'Eface' + field_loc (4) = field_loc_Eface + locs_name (5) = 'none' + field_loc (5) = field_loc_noupdate +! locs_name (6) = 'unknown' +! field_loc (6) = field_loc_unknown ! aborts in CICE, as expected + + tottest = maxtests * maxlocs * maxtypes + allocate(errorflag(tottest)) + allocate(teststring(tottest)) + allocate(ptcnt(tottest)) + allocate(failcnt(tottest)) + ptcnt(:) = 0 + failcnt(:) = 0 + + !----------------------------------------------------------------- + ! Testing + !----------------------------------------------------------------- + + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) '==========================================================' + write(6,*) ' ' + write(6,*) 'RunningUnitTest HALOCHK' + write(6,*) ' ' + write(6,*) ' npes = ',npes + write(6,*) ' my_task = ',my_task + write(6,*) ' nx_global = ',nx_global + write(6,*) ' ny_global = ',ny_global + write(6,*) ' block_size_x = ',block_size_x + write(6,*) ' block_size_y = ',block_size_y + write(6,*) ' nblocks_tot = ',nblocks_tot + write(6,*) ' tottest = ',tottest + write(6,*) ' ' + endif + + errorflag0 = passflag + errorflag(:) = passflag + teststring(:) = ' ' + + ! --------------------------- + ! TEST HALO UPDATE + ! --------------------------- + + if (my_task == master_task) write(6,*) ' ' + + allocate(darrayi1 (nx_block,ny_block,max_blocks)) + allocate(darrayj1 (nx_block,ny_block,max_blocks)) + allocate(darrayi2 (nx_block,ny_block,nz1,max_blocks)) + allocate(darrayj2 (nx_block,ny_block,nz1,max_blocks)) + allocate(darrayi3 (nx_block,ny_block,nz1,nz2,max_blocks)) + allocate(darrayj3 (nx_block,ny_block,nz1,nz2,max_blocks)) + allocate(rarrayi1 (nx_block,ny_block,max_blocks)) + allocate(rarrayj1 (nx_block,ny_block,max_blocks)) + allocate(rarrayi2 (nx_block,ny_block,nz1,max_blocks)) + allocate(rarrayj2 (nx_block,ny_block,nz1,max_blocks)) + allocate(rarrayi3 (nx_block,ny_block,nz1,nz2,max_blocks)) + allocate(rarrayj3 (nx_block,ny_block,nz1,nz2,max_blocks)) + allocate(iarrayi1 (nx_block,ny_block,max_blocks)) + allocate(iarrayj1 (nx_block,ny_block,max_blocks)) + allocate(iarrayi2 (nx_block,ny_block,nz1,max_blocks)) + allocate(iarrayj2 (nx_block,ny_block,nz1,max_blocks)) + allocate(iarrayi3 (nx_block,ny_block,nz1,nz2,max_blocks)) + allocate(iarrayj3 (nx_block,ny_block,nz1,nz2,max_blocks)) + allocate(larrayi1 (nx_block,ny_block,max_blocks)) + allocate(larrayj1 (nx_block,ny_block,max_blocks)) + allocate(darrayi1str(nx_block,ny_block,max_blocks)) + allocate(darrayj1str(nx_block,ny_block,max_blocks)) + allocate(darrayi10 (nx_block,ny_block,max_blocks)) + allocate(darrayj10 (nx_block,ny_block,max_blocks)) + + allocate(cidata_bas(nx_block,ny_block,nz1,nz2,max_blocks)) + allocate(cjdata_bas(nx_block,ny_block,nz1,nz2,max_blocks)) + allocate(cidata_std(nx_block,ny_block,nz1,nz2,max_blocks)) + allocate(cjdata_std(nx_block,ny_block,nz1,nz2,max_blocks)) + allocate(cidata_nup(nx_block,ny_block,nz1,nz2,max_blocks)) + allocate(cjdata_nup(nx_block,ny_block,nz1,nz2,max_blocks)) + + darrayi1 = fillval + darrayj1 = fillval + darrayi2 = fillval + darrayj2 = fillval + darrayi3 = fillval + darrayj3 = fillval + rarrayi1 = fillval + rarrayj1 = fillval + rarrayi2 = fillval + rarrayj2 = fillval + rarrayi3 = fillval + rarrayj3 = fillval + iarrayi1 = fillval + iarrayj1 = fillval + iarrayi2 = fillval + iarrayj2 = fillval + iarrayi3 = fillval + iarrayj3 = fillval + larrayi1 = .false. + larrayj1 = .true. + darrayi1str = fillval + darrayj1str = fillval + darrayi10 = fillval + darrayj10 = fillval + cidata_bas = fillval + cjdata_bas = fillval + cidata_std = fillval + cjdata_std = fillval + cidata_nup = fillval + cjdata_nup = fillval + + call ice_distributionGet(distrb_info, numLocalBlocks = numBlocks) + + !--- baseline data --- + + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + do k2 = 1,nz2 + do k1 = 1,nz1 + do j = 1,ny_block + do i = 1,nx_block + cidata_bas(i,j,k1,k2,iblock) = real(this_block%i_glob(i),kind=dbl_kind) + & + real(k1,kind=dbl_kind)*1000._dbl_kind + real(k2,kind=dbl_kind)*10000._dbl_kind + cjdata_bas(i,j,k1,k2,iblock) = real(this_block%j_glob(j),kind=dbl_kind) + & + real(k1,kind=dbl_kind)*1000._dbl_kind + real(k2,kind=dbl_kind)*10000._dbl_kind + enddo + enddo + enddo + enddo + enddo + + !--- setup nup (noupdate) solution, set halo/pad will fillval --- + + cidata_nup(:,:,:,:,:) = cidata_bas(:,:,:,:,:) + cjdata_nup(:,:,:,:,:) = cjdata_bas(:,:,:,:,:) + + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + cidata_nup(1:ib-1 ,: ,:,:,iblock) = fillval + cjdata_nup(1:ib-1 ,: ,:,:,iblock) = fillval + cidata_nup(ie+1:nx_block,: ,:,:,iblock) = fillval + cjdata_nup(ie+1:nx_block,: ,:,:,iblock) = fillval + cidata_nup(: ,1:jb-1 ,:,:,iblock) = fillval + cjdata_nup(: ,1:jb-1 ,:,:,iblock) = fillval + cidata_nup(: ,je+1:ny_block,:,:,iblock) = fillval + cjdata_nup(: ,je+1:ny_block,:,:,iblock) = fillval + enddo + + !--- setup std solution for cyclic, closed, open, tripole solution --- + + cidata_std(:,:,:,:,:) = cidata_bas(:,:,:,:,:) + cjdata_std(:,:,:,:,:) = cjdata_bas(:,:,:,:,:) + + !--- halo off on east and west boundary --- + if (ew_boundary_type == 'closed' .or. & + ew_boundary_type == 'open' ) then + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + if (this_block%i_glob(ib) == 1) then + cidata_std(1:ib-1 ,:,:,:,iblock) = dhalofillval + cjdata_std(1:ib-1 ,:,:,:,iblock) = dhalofillval + endif + if (this_block%i_glob(ie) == nx_global) then + cidata_std(ie+1:nx_block,:,:,:,iblock) = dhalofillval + cjdata_std(ie+1:nx_block,:,:,:,iblock) = dhalofillval + endif + enddo + endif + + !--- halo off on south boundary --- + if (ns_boundary_type == 'closed' .or. & + ns_boundary_type == 'open' .or. & + ns_boundary_type == 'tripole' .or. & + ns_boundary_type == 'tripoleT' ) then + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + if (this_block%j_glob(jb) == 1) then + cidata_std(:,1:jb-1,:,:,iblock) = dhalofillval + cjdata_std(:,1:jb-1,:,:,iblock) = dhalofillval + endif + enddo + endif + + !--- halo off on north boundary, tripole handled later --- + if (ns_boundary_type == 'closed' .or. & + ns_boundary_type == 'open' .or. & + ns_boundary_type == 'tripole' .or. & + ns_boundary_type == 'tripoleT' ) then + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + if (this_block%j_glob(je) == ny_global) then + cidata_std(:,je+1:ny_block,:,:,iblock) = dhalofillval + cjdata_std(:,je+1:ny_block,:,:,iblock) = dhalofillval + endif + enddo + endif + + !--------------------------------------------------------------- + + testcnt = 0 + do nn = 1, maxtests + do nl = 1, maxlocs + do nt = 1, maxtypes + + !--- setup test --- + first_call = .true. + testcnt = testcnt + 1 + if (testcnt > tottest) then + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) 'HALOCHK FAILED' + write(6,*) ' ' + endif + call abort_ice(subname//' testcnt > tottest',file=__FILE__,line=__LINE__) + endif + + !--- fill arrays --- + darrayi1(:,:,:) = fillval + darrayj1(:,:,:) = fillval + darrayi2(:,:,:,:) = fillval + darrayj2(:,:,:,:) = fillval + darrayi3(:,:,:,:,:) = fillval + darrayj3(:,:,:,:,:) = fillval + darrayi1str(:,:,:) = fillval + darrayj1str(:,:,:) = fillval + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + do j = jb,je + do i = ib,ie + k1 = 1 + k2 = 1 + darrayi1(i,j,iblock) = cidata_bas(i,j,k1,k2,iblock) + darrayj1(i,j,iblock) = cjdata_bas(i,j,k1,k2,iblock) + do k1 = 1,nz1 + k2 = 1 + darrayi2(i,j,k1,iblock) = cidata_bas(i,j,k1,k2,iblock) + darrayj2(i,j,k1,iblock) = cjdata_bas(i,j,k1,k2,iblock) + do k2 = 1,nz2 + darrayi3(i,j,k1,k2,iblock) = cidata_bas(i,j,k1,k2,iblock) + darrayj3(i,j,k1,k2,iblock) = cjdata_bas(i,j,k1,k2,iblock) + enddo + enddo + enddo + enddo + enddo + + ! copy original darray1 for "stress" compare + darrayi10 = darrayi1 + darrayj10 = darrayj1 + + + !--- halo update --- + + if (nn == 1) then + k1m = 1 + k2m = 1 + halofld = '2DR8' + call ice_haloUpdate(darrayi1, halo_info, field_loc(nl), field_type(nt), fillvalue=dhalofillval) + call ice_haloUpdate(darrayj1, halo_info, field_loc(nl), field_type(nt), fillvalue=dhalofillval) + elseif (nn == 2) then + k1m = nz1 + k2m = 1 + halofld = '3DR8' + call ice_haloUpdate(darrayi2, halo_info, field_loc(nl), field_type(nt), fillvalue=dhalofillval) + call ice_haloUpdate(darrayj2, halo_info, field_loc(nl), field_type(nt), fillvalue=dhalofillval) + elseif (nn == 3) then + k1m = nz1 + k2m = nz2 + halofld = '4DR8' + call ice_haloUpdate(darrayi3, halo_info, field_loc(nl), field_type(nt), fillvalue=dhalofillval) + call ice_haloUpdate(darrayj3, halo_info, field_loc(nl), field_type(nt), fillvalue=dhalofillval) + elseif (nn == 4) then + k1m = 1 + k2m = 1 + halofld = '2DR4' + rarrayi1 = real(darrayi1,kind=real_kind) + rarrayj1 = real(darrayj1,kind=real_kind) + call ice_haloUpdate(rarrayi1, halo_info, field_loc(nl), field_type(nt), fillvalue=rhalofillval) + call ice_haloUpdate(rarrayj1, halo_info, field_loc(nl), field_type(nt), fillvalue=rhalofillval) + darrayi1 = real(rarrayi1,kind=dbl_kind) + darrayj1 = real(rarrayj1,kind=dbl_kind) + elseif (nn == 5) then + k1m = nz1 + k2m = 1 + halofld = '3DR4' + rarrayi2 = real(darrayi2,kind=real_kind) + rarrayj2 = real(darrayj2,kind=real_kind) + call ice_haloUpdate(rarrayi2, halo_info, field_loc(nl), field_type(nt), fillvalue=rhalofillval) + call ice_haloUpdate(rarrayj2, halo_info, field_loc(nl), field_type(nt), fillvalue=rhalofillval) + darrayi2 = real(rarrayi2,kind=dbl_kind) + darrayj2 = real(rarrayj2,kind=dbl_kind) + elseif (nn == 6) then + k1m = nz1 + k2m = nz2 + halofld = '4DR4' + rarrayi3 = real(darrayi3,kind=real_kind) + rarrayj3 = real(darrayj3,kind=real_kind) + call ice_haloUpdate(rarrayi3, halo_info, field_loc(nl), field_type(nt), fillvalue=rhalofillval) + call ice_haloUpdate(rarrayj3, halo_info, field_loc(nl), field_type(nt), fillvalue=rhalofillval) + darrayi3 = real(rarrayi3,kind=dbl_kind) + darrayj3 = real(rarrayj3,kind=dbl_kind) + elseif (nn == 7) then + k1m = 1 + k2m = 1 + halofld = '2DI4' + iarrayi1 = nint(darrayi1) + iarrayj1 = nint(darrayj1) + call ice_haloUpdate(iarrayi1, halo_info, field_loc(nl), field_type(nt), fillvalue=ihalofillval) + call ice_haloUpdate(iarrayj1, halo_info, field_loc(nl), field_type(nt), fillvalue=ihalofillval) + darrayi1 = real(iarrayi1,kind=dbl_kind) + darrayj1 = real(iarrayj1,kind=dbl_kind) + elseif (nn == 8) then + k1m = nz1 + k2m = 1 + halofld = '3DI4' + iarrayi2 = nint(darrayi2) + iarrayj2 = nint(darrayj2) + call ice_haloUpdate(iarrayi2, halo_info, field_loc(nl), field_type(nt), fillvalue=ihalofillval) + call ice_haloUpdate(iarrayj2, halo_info, field_loc(nl), field_type(nt), fillvalue=ihalofillval) + darrayi2 = real(iarrayi2,kind=dbl_kind) + darrayj2 = real(iarrayj2,kind=dbl_kind) + elseif (nn == 9) then + k1m = nz1 + k2m = nz2 + halofld = '4DI4' + iarrayi3 = nint(darrayi3) + iarrayj3 = nint(darrayj3) + call ice_haloUpdate(iarrayi3, halo_info, field_loc(nl), field_type(nt), fillvalue=ihalofillval) + call ice_haloUpdate(iarrayj3, halo_info, field_loc(nl), field_type(nt), fillvalue=ihalofillval) + darrayi3 = real(iarrayi3,kind=dbl_kind) + darrayj3 = real(iarrayj3,kind=dbl_kind) + elseif (nn == 10) then + k1m = 1 + k2m = 1 + halofld = '2DL1' + larrayi1 = .true. + where (darrayi1 == fillval) larrayi1 = .false. + larrayj1 = .false. + where (darrayj1 == fillval) larrayj1 = .true. + call ice_haloUpdate(larrayi1, halo_info, field_loc(nl), field_type(nt), fillvalue=0) + call ice_haloUpdate(larrayj1, halo_info, field_loc(nl), field_type(nt), fillvalue=1) + darrayi1 = c0 + where (larrayi1) darrayi1 = c1 + darrayj1 = c0 + where (larrayj1) darrayj1 = c1 + elseif (nn == 11) then + k1m = 1 + k2m = 1 + halofld = 'STRESS' + darrayi1str = -darrayi1 ! flip sign for testing + darrayj1str = -darrayj1 + call ice_haloUpdate_stress(darrayi1, darrayi1str, halo_info, field_loc(nl), field_type(nt), fillvalue=dhalofillval) + call ice_haloUpdate_stress(darrayj1, darrayj1str, halo_info, field_loc(nl), field_type(nt), fillvalue=dhalofillval) + endif + + write(teststring(testcnt),'(5a10)') trim(halofld),trim(locs_name(nl)),trim(types_name(nt)), & + trim(ew_boundary_type),trim(ns_boundary_type) + + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + ! just check non-padded gridcells +! do j = 1,ny_block +! do i = 1,nx_block + do j = jb-nghost, je+nghost + do i = ib-nghost, ie+nghost + do k1 = 1,k1m + do k2 = 1,k2m + tripole_average = .false. + tripole_pole = .false. + spvalL1 = .false. + if (index(halofld,'2D') > 0) then + aichk = darrayi1(i,j,iblock) + ajchk = darrayj1(i,j,iblock) + elseif (index(halofld,'STRESS') > 0) then + aichk = darrayi1(i,j,iblock) + ajchk = darrayj1(i,j,iblock) + elseif (index(halofld,'3D') > 0) then + aichk = darrayi2(i,j,k1,iblock) + ajchk = darrayj2(i,j,k1,iblock) + elseif (index(halofld,'4D') > 0) then + aichk = darrayi3(i,j,k1,k2,iblock) + ajchk = darrayj3(i,j,k1,k2,iblock) + else + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) 'HALOCHK FAILED' + write(6,*) ' ' + endif + call abort_ice(subname//' halofld not matched '//trim(halofld),file=__FILE__,line=__LINE__) + endif + + + if (field_loc (nl) == field_loc_noupdate .or. & + field_type(nt) == field_type_noupdate) then + cichk = cidata_nup(i,j,k1,k2,iblock) + cjchk = cjdata_nup(i,j,k1,k2,iblock) + else + cichk = cidata_std(i,j,k1,k2,iblock) + cjchk = cjdata_std(i,j,k1,k2,iblock) + + if (index(halofld,'STRESS') > 0) then + ! only updates on tripole zipper for tripole grids + ! darrayi10 is copy of darrayi1 before halo call + cichk = darrayi10(i,j,iblock) + cjchk = darrayj10(i,j,iblock) + endif + + !--- tripole on north boundary, need to hardcode --- + !--- tripole and tripoleT slightly different --- + !--- establish special set of points here --- + if ((this_block%j_glob(je) == ny_global) .and. & + ((ns_boundary_type == 'tripole' .and. & + (j > je .or. & + (j == je .and. (field_loc(nl) == field_loc_Nface .or. field_loc(nl) == field_loc_NEcorner)))) .or. & + (ns_boundary_type == 'tripoleT' .and. & + (j >= je)))) then + + ! flip sign for vector/angle + if (field_type(nt) == field_type_vector .or. field_type(nt) == field_type_angle ) then + rsign = -c1 + else + rsign = c1 + endif + + ! for tripole + if (ns_boundary_type == 'tripole') then + + ! compute itrip and jtrip, these are the location where the halo values are defined for i,j + ! for j=je averaging, itrip and jtrip are the 2nd gridpoint associated with averaging + + ! standard center tripole u-fold + itrip = nx_global-this_block%i_glob(i)+1 + jtrip = max(je - (j-je) + 1 , je) + ioffset = 0 + joffset = 0 + + if (field_loc(nl) == field_loc_NEcorner .or. field_loc(nl) == field_loc_Nface) then + ! need j offset + joffset = -1 + if (j == je) then + tripole_average = .true. + endif + endif + + if (field_loc(nl) == field_loc_NEcorner .or. field_loc(nl) == field_loc_Eface) then + ! fold plus cell offset + ioffset = -1 + ! CICE treats j=ny_global tripole edge points incorrectly + ! should do edge wraparound and average + ! CICE does not update those points, assumes it's "land" + if (j == je) then + if (this_block%i_glob(i) == nx_global/2) then + tripole_pole = .true. + elseif (this_block%i_glob(i) == nx_global ) then + tripole_pole = .true. + endif + endif + endif + + ! for tripoleT + elseif (ns_boundary_type == 'tripoleT') then + + ! compute itrip and jtrip, these are the location where the halo values are defined for i,j + ! for j=je averaging, itrip and jtrip are the 2nd gridpoint associated with averaging + + ! standard center tripoleT t-fold + itrip = nx_global-this_block%i_glob(i)+2 + jtrip = je - (j-je) + ioffset = 0 + joffset = 0 + + if (field_loc(nl) == field_loc_NEcorner .or. field_loc(nl) == field_loc_Eface) then + ! fold plus cell offset + ioffset = -1 + endif + + if (field_loc(nl) == field_loc_NEcorner .or. field_loc(nl) == field_loc_Nface) then + ! need j offset + joffset = -1 + endif + + if (field_loc(nl) == field_loc_Center .or. field_loc(nl) == field_loc_Eface) then + if (j == je) then + tripole_average = .true. + endif + endif + + ! center point poles need to be treated special + if (field_loc(nl) == field_loc_Center) then + if (j == je .and. & + (this_block%i_glob(i) == 1 .or. this_block%i_glob(i) == nx_global/2+1)) then + tripole_pole = .true. + endif + endif + + endif + + itrip = mod(itrip + ioffset + nx_global-1,nx_global)+1 + jtrip = jtrip + joffset + + rival = (real(itrip,kind=dbl_kind) + & + real(k1,kind=dbl_kind)*1000._dbl_kind + real(k2,kind=dbl_kind)*10000._dbl_kind) + rjval = (real(this_block%j_glob(jtrip),kind=dbl_kind) + & + real(k1,kind=dbl_kind)*1000._dbl_kind + real(k2,kind=dbl_kind)*10000._dbl_kind) + + if (index(halofld,'STRESS') > 0) then + ! only updates on tripole zipper for tripole grids, not tripoleT + if (tripole_pole) then + ! flip sign due to sign of darrayi1str + ! ends of tripole seam not averaged in CICE + cichk = -rsign * cidata_std(i,j,k1,k2,iblock) + cjchk = -rsign * cjdata_std(i,j,k1,k2,iblock) + else + cichk = -rsign * rival + cjchk = -rsign * rjval + endif + elseif (index(halofld,'L1') > 0 .and. j == je) then + ! force cichk and cjchk to match on tripole average index, calc not well defined + spvalL1 = .true. + cichk = aichk + cjchk = ajchk + elseif (tripole_pole) then + ! ends of tripole seam not averaged in CICE + cichk = rsign * cidata_std(i,j,k1,k2,iblock) + cjchk = rsign * cjdata_std(i,j,k1,k2,iblock) + elseif (tripole_average) then + ! tripole average + cichk = p5 * (cidata_std(i,j,k1,k2,iblock) + rsign * rival) + cjchk = p5 * (cjdata_std(i,j,k1,k2,iblock) + rsign * rjval) + else + ! standard tripole fold + cichk = rsign * rival + cjchk = rsign * rjval + endif + +! if (testcnt == 6 .and. j == 61 .and. i < 3) then +! if (testcnt == 186 .and. j == 61 .and. i<4) then +! if (testcnt == 13 .and. j > 61 .and. (i < 3 .or. i > 89)) then +! if (testcnt == 5 .and. j >= 61 .and. (i < 3 .or. i > 90)) then +! write(100+my_task,'(a,5i6,2l3,f6.2,i6)') 'tcx1 ',i,j,iblock,itrip,jtrip, & +! tripole_average,tripole_pole,rsign,this_block%i_glob(i) +! write(100+my_task,'(a,4f12.2)') 'tcx2 ',cidata_std(i,j,k1,k2,iblock),rival,cichk,aichk +! write(100+my_task,'(a,4f12.2)') 'tcx3 ',cjdata_std(i,j,k1,k2,iblock),rjval,cjchk,ajchk +! endif + endif ! tripole or tripoleT + endif + + if (index(halofld,'I4') > 0) then + cichk = real(nint(cichk),kind=dbl_kind) + cjchk = real(nint(cjchk),kind=dbl_kind) + endif + + if (index(halofld,'L1') > 0 .and. .not.spvalL1) then + if (cichk == dhalofillval .or. cichk == fillval) then + cichk = c0 + else + cichk = c1 + endif + if (cjchk == dhalofillval .or. cjchk == fillval) then + cjchk = c1 + else + cjchk = c0 + endif + endif + + ptcnt(testcnt) = ptcnt(testcnt) + 1 + call chkresults(aichk,cichk,errorflag(testcnt),testcnt,failcnt(testcnt), & + i,j,k1,k2,iblock,first_call,teststring(testcnt),trim(halofld)//'_I') + call chkresults(ajchk,cjchk,errorflag(testcnt),testcnt,failcnt(testcnt), & + i,j,k1,k2,iblock,first_call,teststring(testcnt),trim(halofld)//'_J') + enddo ! k2 + enddo ! k1 + enddo ! i + enddo ! j + enddo ! iblock + + enddo ! maxtypes + enddo ! maxlocs + enddo ! maxtests + + ! --------------------------- + ! SUMMARY + ! --------------------------- + + do n = 1,tottest + gflag = global_maxval(errorflag(n), MPI_COMM_ICE) + errorflag(n) = gflag + ptcntsum = global_sum(ptcnt(n),distrb_info) + ptcnt(n) = ptcntsum + failcntsum = global_sum(failcnt(n),distrb_info) + failcnt(n) = failcntsum + enddo + errorflag0 = maxval(errorflag(:)) + + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) 'HALOCHK COMPLETED SUCCESSFULLY' + write(6,*) ' ' + tpcnt = 0 + tfcnt = 0 + do n = 1,tottest + if (errorflag(n) == passflag) then + tpcnt = tpcnt + 1 + write(6,*) 'PASS ',trim(teststring(n)),ptcnt(n),failcnt(n) + else + tfcnt = tfcnt + 1 + write(6,*) 'FAIL ',trim(teststring(n)),ptcnt(n),failcnt(n) + endif + enddo + write(6,*) ' ' + write(6,*) ' total pass = ',tpcnt + write(6,*) ' total fail = ',tfcnt + write(6,*) ' ' + if (errorflag0 == passflag) then + write(6,*) 'HALOCHK TEST COMPLETED SUCCESSFULLY' + else + write(6,*) 'HALOCHK TEST FAILED' + endif + write(6,*) ' ' + write(6,*) '==========================================================' + write(6,*) ' ' + endif + + + !----------------------------------------------------------------- + ! Gracefully end + !----------------------------------------------------------------- + + call end_run() + + end program halochk + +!======================================================================= + + subroutine chkresults(a1,r1,errorflag,testcnt,failcnt,i,j,k1,k2,iblock,first_call,teststring,halofld) + + use halochk_data + + implicit none + + real(dbl_kind) , intent(in) :: a1,r1 + integer(int_kind), intent(inout) :: errorflag, failcnt + integer(int_kind), intent(in) :: i,j,k1,k2,iblock,testcnt + logical , intent(inout) :: first_call + character(len=*) , intent(in) :: teststring,halofld + + logical,parameter :: print_always = .false. + character(len=*) , parameter :: subname='(chkresults)' + + if (a1 /= r1 .or. print_always) then + errorflag = failflag + failcnt = failcnt + 1 + if (first_call) then + write(100+my_task,*) ' ' + write(100+my_task,'(a,i4,2a)') '------- TEST = ',testcnt,' ',trim(teststring) + write(100+my_task,*) ' ' + write(100+my_task,'(a)') ' test task i j k1 k2 iblock expected halocomp diff' + first_call = .false. + endif + write(100+my_task,1001) trim(halofld),testcnt,my_task,i,j,k1,k2,iblock,r1,a1,r1-a1 + endif + + 1001 format(a8,7i6,3f12.3) + + end subroutine chkresults +!======================================================================= diff --git a/cicecore/shared/ice_constants.F90 b/cicecore/shared/ice_constants.F90 index f2da2ef9d..6656213be 100644 --- a/cicecore/shared/ice_constants.F90 +++ b/cicecore/shared/ice_constants.F90 @@ -97,8 +97,7 @@ module ice_constants field_loc_center = 1, & field_loc_NEcorner = 2, & field_loc_Nface = 3, & - field_loc_Eface = 4, & - field_loc_Wface = 5 + field_loc_Eface = 4 !----------------------------------------------------------------- ! field type attribute - necessary for handling diff --git a/configuration/scripts/Makefile b/configuration/scripts/Makefile index a2f17256f..872f426ad 100644 --- a/configuration/scripts/Makefile +++ b/configuration/scripts/Makefile @@ -74,7 +74,7 @@ AR := ar .SUFFIXES: -.PHONY: all cice libcice targets target db_files db_flags clean realclean helloworld calchk sumchk bcstchk gridavgchk optargs +.PHONY: all cice libcice targets target db_files db_flags clean realclean helloworld calchk sumchk bcstchk gridavgchk halochk optargs all: $(EXEC) cice: $(EXEC) @@ -93,7 +93,7 @@ targets: @echo " " @echo "Supported Makefile Targets are: cice, libcice, makdep, depends, clean, realclean" @echo " Diagnostics: targets, db_files, db_flags" - @echo " Unit Tests : helloworld, calchk, sumchk, bcstchk, gridavgchk, optargs" + @echo " Unit Tests : helloworld, calchk, sumchk, bcstchk, gridavgchk, halochk, optargs" target: targets db_files: @@ -151,6 +151,8 @@ bcstchk: $(EXEC) gridavgchk: $(EXEC) +halochk: $(EXEC) + # this builds just a subset of source code specified explicitly and requires a separate target HWOBJS := helloworld.o diff --git a/configuration/scripts/cice_decomp.csh b/configuration/scripts/cice_decomp.csh index 0c6715f3b..bcf27beee 100755 --- a/configuration/scripts/cice_decomp.csh +++ b/configuration/scripts/cice_decomp.csh @@ -156,8 +156,10 @@ if (${ICE_DECOMP_MXBLCKS} > 0) set mxblcks = ${ICE_DECOMP_MXBLCKS} set decomp = 'cartesian' set dshape = 'slenderX2' -if (${nxglob} % ${cicepes} != 0) set decomp = 'roundrobin' -if (${mxblcks} * ${blcky} * 2 < ${nyglob}) set decomp = 'roundrobin' +if (${cicepes} % 2 != 0) set decomp = 'roundrobin' +if (${nyglob} % (${blcky} * 2) != 0) set decomp = 'roundrobin' +if (${nxglob} % ${blckx} != 0) set decomp = 'roundrobin' +if (((${nxglob} * 2) % (${cicepes} * ${blckx})) != 0) set decomp = 'roundrobin' #--- outputs --- diff --git a/configuration/scripts/options/set_env.halochk b/configuration/scripts/options/set_env.halochk new file mode 100644 index 000000000..d09166d2f --- /dev/null +++ b/configuration/scripts/options/set_env.halochk @@ -0,0 +1,2 @@ +setenv ICE_DRVOPT unittest/halochk +setenv ICE_TARGET halochk diff --git a/configuration/scripts/options/set_nml.cyclic b/configuration/scripts/options/set_nml.cyclic new file mode 100644 index 000000000..3a5ae1a7b --- /dev/null +++ b/configuration/scripts/options/set_nml.cyclic @@ -0,0 +1,3 @@ +ew_boundary_type = 'cyclic' +ns_boundary_type = 'cyclic' + diff --git a/configuration/scripts/options/set_nml.open b/configuration/scripts/options/set_nml.open new file mode 100644 index 000000000..0e2d5f388 --- /dev/null +++ b/configuration/scripts/options/set_nml.open @@ -0,0 +1,3 @@ +ew_boundary_type = 'open' +ns_boundary_type = 'open' + diff --git a/configuration/scripts/options/set_nml.tripole b/configuration/scripts/options/set_nml.tripole new file mode 100644 index 000000000..7904b8134 --- /dev/null +++ b/configuration/scripts/options/set_nml.tripole @@ -0,0 +1,3 @@ +grid_type = 'tripole' +ew_boundary_type = 'cyclic' +ns_boundary_type = 'tripole' diff --git a/configuration/scripts/options/set_nml.tripolet b/configuration/scripts/options/set_nml.tripolet new file mode 100644 index 000000000..4bb63dc17 --- /dev/null +++ b/configuration/scripts/options/set_nml.tripolet @@ -0,0 +1,3 @@ +grid_type = 'tripole' +ew_boundary_type = 'cyclic' +ns_boundary_type = 'tripoleT' diff --git a/configuration/scripts/tests/unittest_suite.ts b/configuration/scripts/tests/unittest_suite.ts index 319c91aa6..7486e87aa 100644 --- a/configuration/scripts/tests/unittest_suite.ts +++ b/configuration/scripts/tests/unittest_suite.ts @@ -1,14 +1,29 @@ # Test Grid PEs Sets BFB-compare -unittest gx3 1x1 helloworld -unittest gx3 1x1 optargs -unittest gx3 1x1 calchk,short -unittest gx3 4x1x25x29x4 sumchk -unittest gx3 1x1x25x29x16 sumchk -unittest tx1 8x1 sumchk -unittest gx3 4x1 bcstchk -unittest gx3 1x1 bcstchk -unittest gx3 8x2 gridavgchk,dwblockall -unittest gx3 12x1 gridavgchk -unittest gx1 28x1 gridavgchk,dwblockall -unittest gx1 16x2 gridavgchk -unittest gbox128 8x2 gridavgchk +unittest gx3 1x1 helloworld +unittest gx3 1x1 optargs +unittest gx3 1x1 calchk,short +unittest gx3 4x1x25x29x4 sumchk +unittest gx3 1x1x25x29x16 sumchk +unittest tx1 8x1 sumchk +unittest gx3 4x1 bcstchk +unittest gx3 1x1 bcstchk +unittest gx3 8x2 gridavgchk,dwblockall +unittest gx3 12x1 gridavgchk +unittest gx1 28x1 gridavgchk,dwblockall +unittest gx1 16x2 gridavgchk +unittest gbox128 8x2 gridavgchk +unittest gbox80 1x1x10x10x80 halochk,cyclic,debug +unittest gbox80 1x1x24x23x16 halochk +unittest gbox80 1x1x23x24x16 halochk,cyclic +unittest gbox80 1x1x23x23x16 halochk,open +unittest tx1 1x1x90x60x16 halochk,dwblockall +unittest tx1 1x1x90x60x16 halochk,dwblockall,tripolet +unittest tx1 1x1x95x65x16 halochk,dwblockall +unittest tx1 1x1x95x65x16 halochk,dwblockall,tripolet +unittest gx3 4x2 halochk,dwblockall,debug +unittest gx3 8x2x16x12x10 halochk,cyclic,dwblockall +unittest gx3 17x1x16x12x10 halochk,open,dwblockall +unittest tx1 4x2 halochk,dwblockall +unittest tx1 4x2 halochk,dwblockall,tripolet +unittest tx1 4x2x65x45x10 halochk,dwblockall +unittest tx1 4x2x57x43x12 halochk,dwblockall,tripolet diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 047204380..d9ea07a02 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -275,32 +275,74 @@ the namelist variable ``ns_boundary_type``, ‘tripole’ for the U-fold and ‘tripoleT’ for the T-fold grid. In the U-fold tripole grid, the poles have U-index -:math:`{\tt nx\_global}/2` and ``nx_global`` on the top U-row of the -physical grid, and points with U-index i and :math:`{\tt nx\_global-i}` +:math:`nx\_global/2` and :math:`nx\_global` on the top U-row of the +physical grid, and points with U-index :math:`i` and :math:`nx\_global-i` are coincident. Let the fold have U-row index :math:`n` on the global grid; this will also be the T-row index of the T-row to the south of the fold. There are ghost (halo) T- and U-rows to the north, beyond the fold, on the logical grid. The point with index i along the ghost T-row of index :math:`n+1` physically coincides with point -:math:`{\tt nx\_global}-{\tt i}+1` on the T-row of index :math:`n`. The +:math:`nx\_global-i+1` on the T-row of index :math:`n`. The ghost U-row of index :math:`n+1` physically coincides with the U-row of -index :math:`n-1`. +index :math:`n-1`. In the schematics below, symbols A-H represent +grid points from 1:nx_global at a given j index and the setup of the +tripole seam is depicted within a few rows of the seam. -In the T-fold tripole grid, the poles have T-index 1 and and -:math:`{\tt nx\_global}/2+1` on the top T-row of the physical grid, and -points with T-index i and :math:`{\tt nx\_global}-{\tt i}+2` are +.. _tab-tripole: + +.. table:: Tripole (u-fold) Grid Schematic + :align: center + + +--------------+---------------------------------------+--------------+ + | global j | | global j | + | index | grid point IDs (i index) | index source | + +==============+====+====+====+====+====+====+====+====+==============+ + | ny_global+2 | H | G | F | E | D | C | B | A | ny_global-1 | + +--------------+----+----+----+----+----+----+----+----+--------------+ + | ny_global+1 | H | G | F | E | D | C | B | A | ny_global | + +--------------+----+----+----+----+----+----+----+----+--------------+ + | ny_global | A | B | C | D | E | F | G | H | | + +--------------+----+----+----+----+----+----+----+----+--------------+ + | ny_global-1 | A | B | C | D | E | F | G | H | | + +--------------+----+----+----+----+----+----+----+----+--------------+ + + +In the T-fold tripole grid, the poles have T-index :math:`1` and and +:math:`nx\_global/2+1` on the top T-row of the physical grid, and +points with T-index :math:`i` and :math:`nx\_global-i+2` are coincident. Let the fold have T-row index :math:`n` on the global grid. It is usual for the northernmost row of the physical domain to be a U-row, but in the case of the T-fold, the U-row of index :math:`n` is “beyond” the fold; although it is not a ghost row, it is not physically independent, because it coincides with U-row :math:`n-1`, and it therefore has to be treated like a ghost row. Points i on U-row -:math:`n` coincides with :math:`{\tt nx\_global}-{\tt i}+1` on U-row +:math:`n` coincides with :math:`nx\_global-i+1` on U-row :math:`n-1`. There are still ghost T- and U-rows :math:`n+1` to the north of U-row :math:`n`. Ghost T-row :math:`n+1` coincides with T-row :math:`n-1`, and ghost U-row :math:`n+1` coincides with U-row :math:`n-2`. +.. _tab-tripoleT: + +.. table:: TripoleT (t-fold) Grid Schematic + :align: center + + +--------------+--------------------------------------------+--------------+ + | global j | | global j | + | index | grid point IDs (i index) | index source | + +==============+====+====+====+====+====+====+====+====+====+==============+ + | ny_global+2 | | H | G | F | E | D | C | B | A | ny_global-2 | + +--------------+----+----+----+----+----+----+----+----+----+--------------+ + | ny_global+1 | | H | G | F | E | D | C | B | A | ny_global-1 | + +--------------+----+----+----+----+----+----+----+----+----+--------------+ + | ny_global | A | BH | CG | DF | E | FD | GC | HB | | | + +--------------+----+----+----+----+----+----+----+----+----+--------------+ + | ny_global-1 | A | B | C | D | E | F | G | H | | | + +--------------+----+----+----+----+----+----+----+----+----+--------------+ + | ny_global-2 | A | B | C | D | E | F | G | H | | | + +--------------+----+----+----+----+----+----+----+----+----+--------------+ + + The tripole grid thus requires two special kinds of treatment for certain rows, arranged by the halo-update routines. First, within rows along the fold, coincident points must always have the same value. This @@ -310,7 +352,8 @@ the coincident physical rows. Both operations involve the tripole buffer, which is used to assemble the data for the affected rows. Special treatment is also required in the scattering routine, and when computing global sums one of each pair of coincident points has to be -excluded. +excluded. Halos of center, east, north, and northeast points are supported, +and each requires slightly different halo indexing across the tripole seam. ***************** Rectangular grids diff --git a/doc/source/user_guide/ug_testing.rst b/doc/source/user_guide/ug_testing.rst index 289f626a9..606ae1397 100644 --- a/doc/source/user_guide/ug_testing.rst +++ b/doc/source/user_guide/ug_testing.rst @@ -736,6 +736,7 @@ The following are brief descriptions of some of the current unit tests, - **calchk** is a unit test that exercises the CICE calendar over 100,000 years and verifies correctness. This test does not depend on the CICE initialization. - **gridavgchk** is a unit test that exercises the CICE grid_average_X2Y methods and verifies results. + - **halochk** is a unit test that exercises the CICE haloUpdate methods and verifies results. - **helloworld** is a simple test that writes out helloworld and uses no CICE infrastructure. This tests exists to demonstrate how to build a unit test by specifying the object files directly in the Makefile From 03615e87fc6d1909d033c842a7870dc8b22bb6ee Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Tue, 14 Mar 2023 10:34:11 -0700 Subject: [PATCH 55/57] Update Icepack to #37e215b532 March 3, 2023 (#821) --- icepack | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/icepack b/icepack index 8f96707a9..37e215b53 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 8f96707a90132ca119d81ed84e5a62ca0ff3ed96 +Subproject commit 37e215b5329463591d2cce228883fd34aa0ea3be From adce2223aee720e9307d6931b88aad2db35a0f5a Mon Sep 17 00:00:00 2001 From: Tony Craig Date: Wed, 15 Mar 2023 15:43:22 -0700 Subject: [PATCH 56/57] update documentation (#822) to fix the latex/pdf errors recently trapped by readthedocs. --- doc/source/science_guide/sg_dynamics.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index fd5e2d760..585c18616 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -237,15 +237,15 @@ In the VP approach, equation :eq:`momsys` is discretized implicitly using a Back and stresses are not computed explicitly: .. math:: - \begin{align} + \begin{aligned} m\frac{(u^{n}-u^{n-1})}{\Delta t} &= \frac{\partial \sigma_{1j}^n}{\partial x_j} - \tau_{w,x}^n + \tau_{b,x}^n + mfv^n + r_{x}^n, \\ m\frac{(v^{n}-v^{n-1})}{\Delta t} &= \frac{\partial \sigma^{n} _{2j}}{\partial x_j} - - \tau_{w,y}^n + \tau_{b,y}^n -mfu^{n} + - \tau_{w,y}^n + \tau_{b,y}^n - mfu^{n} + r_{y}^n - \end{align} + \end{aligned} :label: u_sit where :math:`r = (r_x,r_y)` contains all terms that do not depend on the velocities :math:`u^n, v^n` (namely the sea surface tilt and the wind stress). @@ -297,7 +297,7 @@ The Hibler-Bryan form for the ice-ocean stress :cite:`Hibler87` is included in **ice\_dyn\_shared.F90** but is currently commented out, pending further testing. -.. _seabed-stress: +.. _seabedstress: Seabed stress ~~~~~~~~~~~~~ From 7df80baa1a67769ad0aa4e07bbf8d250079d410d Mon Sep 17 00:00:00 2001 From: "David A. Bailey" Date: Wed, 15 Mar 2023 16:45:16 -0600 Subject: [PATCH 57/57] FSD updates for icepack. (#813) * send wlat to and fro for FSD * Update Icepack to include FSDmods --------- Co-authored-by: cmbitz --- cicecore/cicedyn/general/ice_flux.F90 | 4 +++- cicecore/cicedyn/general/ice_step_mod.F90 | 6 ++++-- icepack | 2 +- 3 files changed, 8 insertions(+), 4 deletions(-) diff --git a/cicecore/cicedyn/general/ice_flux.F90 b/cicecore/cicedyn/general/ice_flux.F90 index 8d190753e..5145fec66 100644 --- a/cicecore/cicedyn/general/ice_flux.F90 +++ b/cicecore/cicedyn/general/ice_flux.F90 @@ -366,6 +366,7 @@ module ice_flux vatmT , & ! vatm on T grid (m/s) rside , & ! fraction of ice that melts laterally fside , & ! lateral heat flux (W/m^2) + wlat , & ! lateral heat rate (m/s) fsw , & ! incoming shortwave radiation (W/m^2) coszen , & ! cosine solar zenith angle, < 0 for sun below horizon rdg_conv, & ! convergence term for ridging (1/s) @@ -540,7 +541,8 @@ subroutine alloc_flux uatmT (nx_block,ny_block,max_blocks), & ! uatm on T grid vatmT (nx_block,ny_block,max_blocks), & ! vatm on T grid rside (nx_block,ny_block,max_blocks), & ! fraction of ice that melts laterally - fside (nx_block,ny_block,max_blocks), & ! lateral melt rate (W/m^2) + fside (nx_block,ny_block,max_blocks), & ! lateral melt flux (W/m^2) + wlat (nx_block,ny_block,max_blocks), & ! lateral melt rate (m/s) fsw (nx_block,ny_block,max_blocks), & ! incoming shortwave radiation (W/m^2) coszen (nx_block,ny_block,max_blocks), & ! cosine solar zenith angle, < 0 for sun below horizon rdg_conv (nx_block,ny_block,max_blocks), & ! convergence term for ridging (1/s) diff --git a/cicecore/cicedyn/general/ice_step_mod.F90 b/cicecore/cicedyn/general/ice_step_mod.F90 index 8dd6fe49a..56510c247 100644 --- a/cicecore/cicedyn/general/ice_step_mod.F90 +++ b/cicecore/cicedyn/general/ice_step_mod.F90 @@ -227,7 +227,7 @@ subroutine step_therm1 (dt, iblk) use ice_calendar, only: yday use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero use ice_flux, only: frzmlt, sst, Tf, strocnxT_iavg, strocnyT_iavg, rside, fbot, Tbot, Tsnice, & - meltsn, melttn, meltbn, congeln, snoicen, uatmT, vatmT, fside, & + meltsn, melttn, meltbn, congeln, snoicen, uatmT, vatmT, fside, wlat, & wind, rhoa, potT, Qa, zlvl, zlvs, strax, stray, flatn, fsensn, fsurfn, fcondtopn, & flw, fsnow, fpond, sss, mlt_onset, frz_onset, fcondbotn, fcondbot, fsloss, & frain, Tair, strairxT, strairyT, fsurf, fcondtop, fsens, & @@ -469,6 +469,7 @@ subroutine step_therm1 (dt, iblk) frzmlt = frzmlt (i,j, iblk), & rside = rside (i,j, iblk), & fside = fside (i,j, iblk), & + wlat = wlat (i,j, iblk), & fsnow = fsnow (i,j, iblk), & frain = frain (i,j, iblk), & fpond = fpond (i,j, iblk), & @@ -618,7 +619,7 @@ subroutine step_therm2 (dt, iblk) use ice_calendar, only: yday use ice_domain_size, only: ncat, nilyr, nslyr, nblyr, nfsd use ice_flux, only: fresh, frain, fpond, frzmlt, frazil, frz_onset, & - update_ocn_f, fsalt, Tf, sss, salinz, fhocn, rside, fside, & + update_ocn_f, fsalt, Tf, sss, salinz, fhocn, rside, fside, wlat, & meltl, frazil_diag use ice_flux_bgc, only: flux_bio, faero_ocn, & fiso_ocn, HDO_ocn, H2_16O_ocn, H2_18O_ocn @@ -702,6 +703,7 @@ subroutine step_therm2 (dt, iblk) rside = rside (i,j, iblk), & meltl = meltl (i,j, iblk), & fside = fside (i,j, iblk), & + wlat = wlat (i,j, iblk), & frzmlt = frzmlt (i,j, iblk), & frazil = frazil (i,j, iblk), & frain = frain (i,j, iblk), & diff --git a/icepack b/icepack index 37e215b53..a4779cc71 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit 37e215b5329463591d2cce228883fd34aa0ea3be +Subproject commit a4779cc71125b40a7db3a4da8512247cbf2b0955