From a4cc8f64ac5495a94f2edc8c55019eb32972b5ff Mon Sep 17 00:00:00 2001 From: Denise Worthen Date: Mon, 23 Jan 2023 07:58:06 -0500 Subject: [PATCH] Update CICE for latest Consortium/main (#56) --- .github/workflows/test-cice.yml | 14 + cice.setup | 17 +- cicecore/cicedyn/analysis/ice_diagnostics.F90 | 45 +- cicecore/cicedyn/analysis/ice_history.F90 | 65 +- cicecore/cicedyn/analysis/ice_history_bgc.F90 | 31 +- .../cicedyn/analysis/ice_history_drag.F90 | 30 +- cicecore/cicedyn/analysis/ice_history_fsd.F90 | 30 +- .../cicedyn/analysis/ice_history_mechred.F90 | 30 +- .../cicedyn/analysis/ice_history_pond.F90 | 36 +- .../cicedyn/analysis/ice_history_shared.F90 | 2 +- .../cicedyn/analysis/ice_history_snow.F90 | 34 +- cicecore/cicedyn/dynamics/ice_dyn_eap.F90 | 129 +- cicecore/cicedyn/dynamics/ice_dyn_evp.F90 | 849 ++- .../dynamics/ice_dyn_evp_1d.F90 | 2 +- cicecore/cicedyn/dynamics/ice_dyn_shared.F90 | 142 +- cicecore/cicedyn/dynamics/ice_dyn_vp.F90 | 84 +- cicecore/cicedyn/general/ice_forcing.F90 | 16 +- cicecore/cicedyn/general/ice_init.F90 | 547 +- .../infrastructure/comm/mpi/ice_boundary.F90 | 3 - .../comm/serial/ice_boundary.F90 | 3 - .../cicedyn/infrastructure/ice_domain.F90 | 34 +- cicecore/cicedyn/infrastructure/ice_grid.F90 | 627 +-- .../infrastructure/ice_memusage_gptl.c | 2 +- .../cicedyn/infrastructure/ice_read_write.F90 | 1293 ++--- .../infrastructure/ice_restart_driver.F90 | 156 +- .../io/io_netcdf/ice_history_write.F90 | 2085 ++++---- .../io/io_pio2/ice_history_write.F90 | 1313 ++--- .../infrastructure/io/io_pio2/ice_restart.F90 | 1144 ++-- cicecore/cicedynB/dynamics/ice_dyn_evp.F90 | 2304 -------- cicecore/cicedynB/dynamics/ice_dyn_shared.F90 | 2937 ----------- cicecore/cicedynB/dynamics/ice_dyn_vp.F90 | 3779 ------------- cicecore/cicedynB/general/ice_init.F90 | 3208 ----------- cicecore/cicedynB/infrastructure/ice_grid.F90 | 4667 ----------------- .../infrastructure/ice_read_write.F90 | 2918 ----------- .../io/io_netcdf/ice_history_write.F90 | 1348 ----- .../io/io_pio2/ice_history_write.F90 | 1298 ----- .../infrastructure/io/io_pio2/ice_restart.F90 | 914 ---- ...ate CICE for latest Consortium_main (#56)) | 1 + .../drivers/direct/hadgem3/CICE_InitMod.F90 | 14 +- .../direct/nemo_concepts/CICE_InitMod.F90 | 14 +- cicecore/drivers/mct/cesm1/CICE_InitMod.F90 | 12 +- cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 | 13 +- .../drivers/nuopc/cmeps/ice_comp_nuopc.F90 | 6 +- .../drivers/nuopc/cmeps/ice_import_export.F90 | 4 +- cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 | 31 +- .../drivers/standalone/cice/CICE_InitMod.F90 | 12 +- .../unittest/gridavgchk/CICE_InitMod.F90 | 12 +- .../drivers/unittest/sumchk/CICE_InitMod.F90 | 13 +- cicecore/shared/ice_fileunits.F90 | 22 +- cicecore/shared/ice_init_column.F90 | 9 + cicecore/version.txt | 2 +- configuration/scripts/cice.build | 12 +- configuration/scripts/ice_in | 2 + configuration/scripts/tests/base_suite.ts | 3 +- configuration/scripts/tests/decomp_suite.ts | 4 +- configuration/scripts/tests/first_suite.ts | 15 +- configuration/scripts/tests/gridsys_suite.ts | 13 +- configuration/scripts/tests/omp_suite.ts | 112 +- configuration/scripts/tests/perf_suite.ts | 33 +- doc/source/cice_index.rst | 3 +- doc/source/conf.py | 4 +- doc/source/developer_guide/dg_dynamics.rst | 22 +- doc/source/science_guide/sg_horiztrans.rst | 21 +- doc/source/science_guide/sg_tracers.rst | 2 +- doc/source/user_guide/ug_case_settings.rst | 21 +- doc/source/user_guide/ug_implementation.rst | 2 +- doc/source/user_guide/ug_testing.rst | 93 +- doc/source/user_guide/ug_troubleshooting.rst | 20 + 68 files changed, 4518 insertions(+), 28165 deletions(-) rename cicecore/{cicedynB => cicedyn}/dynamics/ice_dyn_evp_1d.F90 (99%) delete mode 100644 cicecore/cicedynB/dynamics/ice_dyn_evp.F90 delete mode 100644 cicecore/cicedynB/dynamics/ice_dyn_shared.F90 delete mode 100644 cicecore/cicedynB/dynamics/ice_dyn_vp.F90 delete mode 100644 cicecore/cicedynB/general/ice_init.F90 delete mode 100644 cicecore/cicedynB/infrastructure/ice_grid.F90 delete mode 100644 cicecore/cicedynB/infrastructure/ice_read_write.F90 delete mode 100644 cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 delete mode 100644 cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 delete mode 100644 cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 create mode 120000 cicecore/cicedynB~e628a9a (Update CICE for latest Consortium_main (#56)) diff --git a/.github/workflows/test-cice.yml b/.github/workflows/test-cice.yml index 32e784564..98581b0bc 100644 --- a/.github/workflows/test-cice.yml +++ b/.github/workflows/test-cice.yml @@ -73,6 +73,20 @@ jobs: ln -s ${GITHUB_WORKSPACE}/../CICE ${HOME}/cice # ls -al ${HOME}/ # ls -al ${GITHUB_WORKSPACE}/ + - name: check for tabs + run: | + cd $HOME/cice/cicecore + set cnt = 0 + set ffiles = `find -P . -iname "*.f*"` + set cfiles = `find -P . -iname "*.c*"` + foreach file ($ffiles $cfiles) + set fcnt = `sed -n '/\t/p' $file | wc -l` + @ cnt = $cnt + $fcnt + if ($fcnt > 0) then + echo "TAB found: $fcnt $file" + endif + end + exit $cnt - name: setup conda env shell: /bin/bash {0} run: | diff --git a/cice.setup b/cice.setup index 586fe3464..30da0ed2e 100755 --- a/cice.setup +++ b/cice.setup @@ -455,7 +455,22 @@ if ( ${dosuite} == 0 ) then set sets = "" else - set tarrays = `echo ${testsuite} | sed 's/,/ /g' | fmt -1 | sort -u` + # generate unique set of suites in tarrays in order they are set + set tarrays0 = `echo ${testsuite} | sed 's/,/ /g' | fmt -1 ` + #echo "${0}: tarrays0 = ${tarrays0}" + set tarrays = $tarrays0[1] + foreach t1 ( ${tarrays0} ) + set found = 0 + foreach t2 ( ${tarrays} ) + if ( ${t1} == ${t2} ) then + set found = 1 + endif + end + if ( ${found} == 0 ) then + set tarrays = ( ${tarrays} ${t1} ) + endif + end + #echo "${0}: tarrays = ${tarrays}" set testsuitecnt = 0 foreach tarray ( ${tarrays} ) @ testsuitecnt = ${testsuitecnt} + 1 diff --git a/cicecore/cicedyn/analysis/ice_diagnostics.F90 b/cicecore/cicedyn/analysis/ice_diagnostics.F90 index 8879d6632..53631b2d4 100644 --- a/cicecore/cicedyn/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedyn/analysis/ice_diagnostics.F90 @@ -87,6 +87,8 @@ module ice_diagnostics totms , & ! total ice/snow water mass (sh) totmin , & ! total ice water mass (nh) totmis , & ! total ice water mass (sh) + totsn , & ! total salt mass (nh) + totss , & ! total salt mass (sh) toten , & ! total ice/snow energy (J) totes ! total ice/snow energy (J) @@ -154,7 +156,7 @@ subroutine runtime_diags (dt) rhofresh, lfresh, lvap, ice_ref_salinity, Tffresh character (len=char_len) :: & - snwredist + snwredist, saltflux_option ! hemispheric state quantities real (kind=dbl_kind) :: & @@ -162,6 +164,8 @@ subroutine runtime_diags (dt) umaxs, hmaxs, shmaxs, areas, snwmxs, extents, shmaxst, & etotn, mtotn, micen, msnwn, pmaxn, ketotn, & etots, mtots, mices, msnws, pmaxs, ketots, & + stotn, & + stots, & urmsn, albtotn, arean_alb, mpndn, ptotn, spondn, & urmss, albtots, areas_alb, mpnds, ptots, sponds @@ -226,7 +230,7 @@ subroutine runtime_diags (dt) awtvdr_out=awtvdr, awtidr_out=awtidr, awtvdf_out=awtvdf, awtidf_out=awtidf, & rhofresh_out=rhofresh, lfresh_out=lfresh, lvap_out=lvap, & ice_ref_salinity_out=ice_ref_salinity,snwredist_out=snwredist, & - snwgrain_out=snwgrain) + snwgrain_out=snwgrain, saltflux_option_out=saltflux_option) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & file=__FILE__, line=__LINE__) @@ -512,6 +516,15 @@ subroutine runtime_diags (dt) etots = global_sum(work1, distrb_info, & field_loc_center, tareas) + ! total salt volume + call total_salt (work2) + + stotn = global_sum(work2, distrb_info, & + field_loc_center, tarean) + stots = global_sum(work2, distrb_info, & + field_loc_center, tareas) + + !----------------------------------------------------------------- ! various fluxes !----------------------------------------------------------------- @@ -785,12 +798,22 @@ subroutine runtime_diags (dt) swerrs = (fswnets - fswdns) / (fswnets - c1) ! salt mass - msltn = micen*ice_ref_salinity*p001 - mslts = mices*ice_ref_salinity*p001 + if (saltflux_option == 'prognostic') then + ! compute the total salt mass + msltn = stotn*rhoi*p001 + mslts = stots*rhoi*p001 + + ! change in salt mass + delmsltn = rhoi*(stotn-totsn)*p001 + delmslts = rhoi*(stots-totss)*p001 + else + msltn = micen*ice_ref_salinity*p001 + mslts = mices*ice_ref_salinity*p001 - ! change in salt mass - delmsltn = delmxn*ice_ref_salinity*p001 - delmslts = delmxs*ice_ref_salinity*p001 + ! change in salt mass + delmsltn = delmxn*ice_ref_salinity*p001 + delmslts = delmxs*ice_ref_salinity*p001 + endif ! salt error serrn = (sfsaltn + delmsltn) / (msltn + c1) @@ -1275,7 +1298,7 @@ subroutine init_mass_diags rhoi, rhos, rhofresh real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 + work1, work2 character(len=*), parameter :: subname = '(init_mass_diags)' @@ -1310,6 +1333,12 @@ subroutine init_mass_diags toten = global_sum(work1, distrb_info, field_loc_center, tarean) totes = global_sum(work1, distrb_info, field_loc_center, tareas) + ! north/south salt + call total_salt (work2) + totsn = global_sum(work2, distrb_info, field_loc_center, tarean) + totss = global_sum(work2, distrb_info, field_loc_center, tareas) + + if (print_points) then do n = 1, npnt if (my_task == pmloc(n)) then diff --git a/cicecore/cicedyn/analysis/ice_history.F90 b/cicecore/cicedyn/analysis/ice_history.F90 index f5e7d0d16..f19158f6a 100644 --- a/cicecore/cicedyn/analysis/ice_history.F90 +++ b/cicecore/cicedyn/analysis/ice_history.F90 @@ -81,6 +81,7 @@ subroutine init_hist (dt) use ice_history_fsd, only: init_hist_fsd_2D, init_hist_fsd_3Df, & init_hist_fsd_4Df, f_afsd, f_afsdn use ice_restart_shared, only: restart + use ice_fileunits, only: goto_nml real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -102,7 +103,11 @@ subroutine init_hist (dt) character (len=25) :: & cstr_gat, cstr_gau, cstr_gav, & ! mask area name for t, u, v atm grid (ga) cstr_got, cstr_gou, cstr_gov ! mask area name for t, u, v ocn grid (go) - character(len=char_len) :: description + character (len=25) :: & + gridstr2D, gridstr ! temporary string names + character(len=char_len) :: description + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=char_len) :: nml_name ! text namelist name character(len=*), parameter :: subname = '(init_hist)' @@ -226,24 +231,39 @@ subroutine init_hist (dt) file=__FILE__, line=__LINE__) if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_nml' + nml_name = 'icefields_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! open file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_nml open file '// & + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & trim(nml_filename), & file=__FILE__, line=__LINE__) endif + ! seek to this namelist + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) endif @@ -418,10 +438,6 @@ subroutine init_hist (dt) f_taubyE = f_tauby endif -#ifndef ncdf - f_bounds = .false. -#endif - ! write dimensions for 3D or 4D history variables ! note: list of variables checked here is incomplete if (f_aicen(1:1) /= 'x' .or. f_vicen(1:1) /= 'x' .or. & @@ -1307,21 +1323,25 @@ subroutine init_hist (dt) select case (grid_ice) case('B') description = ", on U grid (NE corner values)" + gridstr2d = trim(ustr2D) + gridstr = trim(ucstr) case ('CD','C') description = ", on T grid" + gridstr2d = trim(tstr2D) + gridstr = trim(tcstr) end select - call define_hist_field(n_sig1,"sig1","1",ustr2D, ucstr, & + call define_hist_field(n_sig1,"sig1","1",gridstr2d, gridstr, & "norm. principal stress 1", & "sig1 is instantaneous" // trim(description), c1, c0, & ns1, f_sig1) - call define_hist_field(n_sig2,"sig2","1",ustr2D, ucstr, & + call define_hist_field(n_sig2,"sig2","1",gridstr2d, gridstr, & "norm. principal stress 2", & "sig2 is instantaneous" // trim(description), c1, c0, & ns1, f_sig2) - call define_hist_field(n_sigP,"sigP","1",ustr2D, ucstr, & + call define_hist_field(n_sigP,"sigP","1",gridstr2d, gridstr, & "ice pressure", & "sigP is instantaneous" // trim(description), c1, c0, & ns1, f_sigP) @@ -2162,12 +2182,13 @@ subroutine accum_hist (dt) real (kind=dbl_kind) :: awtvdr, awtidr, awtvdf, awtidf, puny, secday, rad_to_deg real (kind=dbl_kind) :: Tffresh, rhoi, rhos, rhow, ice_ref_salinity - real (kind=dbl_kind) :: rho_ice, rho_ocn, Tice, Sbr, phi, rhob, dfresh, dfsalt + real (kind=dbl_kind) :: rho_ice, rho_ocn, Tice, Sbr, phi, rhob, dfresh, dfsalt, sicen logical (kind=log_kind) :: formdrag, skl_bgc logical (kind=log_kind) :: tr_pond, tr_aero, tr_brine, tr_snow integer (kind=int_kind) :: ktherm integer (kind=int_kind) :: nt_sice, nt_qice, nt_qsno, nt_iage, nt_FY, nt_Tsfc, & nt_alvl, nt_vlvl + character (len=char_len) :: saltflux_option type (block) :: & this_block ! block information for current block @@ -2179,6 +2200,7 @@ subroutine accum_hist (dt) call icepack_query_parameters(Tffresh_out=Tffresh, rhoi_out=rhoi, rhos_out=rhos, & rhow_out=rhow, ice_ref_salinity_out=ice_ref_salinity) call icepack_query_parameters(formdrag_out=formdrag, skl_bgc_out=skl_bgc, ktherm_out=ktherm) + call icepack_query_parameters(saltflux_option_out=saltflux_option) call icepack_query_tracer_flags(tr_pond_out=tr_pond, tr_aero_out=tr_aero, & tr_brine_out=tr_brine, tr_snow_out=tr_snow) call icepack_query_tracer_indices(nt_sice_out=nt_sice, nt_qice_out=nt_qice, & @@ -2263,7 +2285,7 @@ subroutine accum_hist (dt) !--------------------------------------------------------------- !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & - !$OMP k,n,qn,ns,sn,rho_ocn,rho_ice,Tice,Sbr,phi,rhob,dfresh,dfsalt, & + !$OMP k,n,qn,ns,sn,rho_ocn,rho_ice,Tice,Sbr,phi,rhob,dfresh,dfsalt,sicen, & !$OMP worka,workb,worka3,Tinz4d,Sinz4d,Tsnz4d) do iblk = 1, nblocks @@ -3222,7 +3244,16 @@ subroutine accum_hist (dt) dfresh = -rhoi*frazil(i,j,iblk)/dt endif endif - dfsalt = ice_ref_salinity*p001*dfresh + if (saltflux_option == 'prognostic') then + sicen = c0 + do k = 1, nzilyr + sicen = sicen + trcr(i,j,nt_sice+k-1,iblk)*vice(i,j,iblk) & + / real(nzilyr,kind=dbl_kind) + enddo + dfsalt = sicen*p001*dfresh + else + dfsalt = ice_ref_salinity*p001*dfresh + endif worka(i,j) = aice(i,j,iblk)*(fsalt(i,j,iblk)+dfsalt) endif enddo diff --git a/cicecore/cicedyn/analysis/ice_history_bgc.F90 b/cicecore/cicedyn/analysis/ice_history_bgc.F90 index 003e76120..6974a087b 100644 --- a/cicecore/cicedyn/analysis/ice_history_bgc.F90 +++ b/cicecore/cicedyn/analysis/ice_history_bgc.F90 @@ -271,6 +271,7 @@ subroutine init_hist_bgc_2D use ice_communicate, only: my_task, master_task use ice_history_shared, only: tstr2D, tcstr, define_hist_field, & f_fsalt, f_fsalt_ai, f_sice + use ice_fileunits, only: goto_nml integer (kind=int_kind) :: n, ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag @@ -283,6 +284,9 @@ subroutine init_hist_bgc_2D tr_bgc_DON, tr_bgc_Fe, tr_bgc_hum, & skl_bgc, solve_zsal, z_tracers + character(len=char_len) :: nml_name ! for namelist check + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=*), parameter :: subname = '(init_hist_bgc_2D)' call icepack_query_parameters(skl_bgc_out=skl_bgc, & @@ -305,24 +309,39 @@ subroutine init_hist_bgc_2D !----------------------------------------------------------------- if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_bgc_nml' + nml_name = 'icefields_bgc_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! check if can open file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_bgc_nml open file '// & + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & trim(nml_filename), & file=__FILE__, line=__LINE__) endif + ! seek to namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_bgc_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_bgc_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) endif diff --git a/cicecore/cicedyn/analysis/ice_history_drag.F90 b/cicecore/cicedyn/analysis/ice_history_drag.F90 index fba19b364..dd9e3cb59 100644 --- a/cicecore/cicedyn/analysis/ice_history_drag.F90 +++ b/cicecore/cicedyn/analysis/ice_history_drag.F90 @@ -64,10 +64,13 @@ subroutine init_hist_drag_2D use ice_calendar, only: nstreams use ice_communicate, only: my_task, master_task use ice_history_shared, only: tstr2D, tcstr, define_hist_field + use ice_fileunits, only: goto_nml integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag logical (kind=log_kind) :: formdrag + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=char_len) :: nml_name ! for namelist check character(len=*), parameter :: subname = '(init_hist_drag_2D)' @@ -81,24 +84,39 @@ subroutine init_hist_drag_2D !----------------------------------------------------------------- if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_drag_nml' + nml_name = 'icefields_drag_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! open namelist file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_drag_nml open file '// & + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & trim(nml_filename), & file=__FILE__, line=__LINE__) endif + ! go to this namelist + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_drag_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_drag_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) endif diff --git a/cicecore/cicedyn/analysis/ice_history_fsd.F90 b/cicecore/cicedyn/analysis/ice_history_fsd.F90 index b52db4e05..610f56608 100644 --- a/cicecore/cicedyn/analysis/ice_history_fsd.F90 +++ b/cicecore/cicedyn/analysis/ice_history_fsd.F90 @@ -76,10 +76,13 @@ subroutine init_hist_fsd_2D use ice_calendar, only: nstreams use ice_communicate, only: my_task, master_task use ice_history_shared, only: tstr2D, tcstr, define_hist_field + use ice_fileunits, only: goto_nml integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag logical (kind=log_kind) :: tr_fsd, wave_spec + character (len=char_len_long) :: tmpstr2 ! test namelist + character(len=char_len) :: nml_name ! text namelist name character(len=*), parameter :: subname = '(init_hist_fsd_2D)' @@ -96,24 +99,39 @@ subroutine init_hist_fsd_2D !----------------------------------------------------------------- if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_fsd_nml' + nml_name = 'icefields_fsd_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! open namelist file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_fsd_nml open file '// & + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & trim(nml_filename), & file=__FILE__, line=__LINE__) endif + ! goto this namelist + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_fsd_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_fsd_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) endif diff --git a/cicecore/cicedyn/analysis/ice_history_mechred.F90 b/cicecore/cicedyn/analysis/ice_history_mechred.F90 index 98c58bc39..e0d15fcf2 100644 --- a/cicecore/cicedyn/analysis/ice_history_mechred.F90 +++ b/cicecore/cicedyn/analysis/ice_history_mechred.F90 @@ -84,11 +84,14 @@ subroutine init_hist_mechred_2D use ice_calendar, only: nstreams, histfreq use ice_communicate, only: my_task, master_task use ice_history_shared, only: tstr2D, tcstr, define_hist_field + use ice_fileunits, only: goto_nml integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag real (kind=dbl_kind) :: secday logical (kind=log_kind) :: tr_lvl + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=char_len) :: nml_name ! for namelist check character(len=*), parameter :: subname = '(init_hist_mechred_2D)' @@ -103,24 +106,39 @@ subroutine init_hist_mechred_2D !----------------------------------------------------------------- if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_mechred_nml' + nml_name = 'icefields_mechred_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! open namelist file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_mechred_nml open file '// & + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & trim(nml_filename), & file=__FILE__, line=__LINE__) endif + ! goto this namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_mechred_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_mechred_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) endif diff --git a/cicecore/cicedyn/analysis/ice_history_pond.F90 b/cicecore/cicedyn/analysis/ice_history_pond.F90 index 8818ff94e..d209e6db6 100644 --- a/cicecore/cicedyn/analysis/ice_history_pond.F90 +++ b/cicecore/cicedyn/analysis/ice_history_pond.F90 @@ -69,10 +69,13 @@ subroutine init_hist_pond_2D use ice_calendar, only: nstreams, histfreq use ice_communicate, only: my_task, master_task use ice_history_shared, only: tstr2D, tcstr, define_hist_field + use ice_fileunits, only: goto_nml integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag logical (kind=log_kind) :: tr_pond + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=char_len) :: nml_name ! text namelist name character(len=*), parameter :: subname = '(init_hist_pond_2D)' @@ -86,24 +89,39 @@ subroutine init_hist_pond_2D !----------------------------------------------------------------- if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_pond_nml' + nml_name = 'icefields_pond_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! open namelist file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_pond_nml open file '// & - trim(nml_filename), & - file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & + trim(nml_filename), & + file=__FILE__, line=__LINE__) endif - + + ! goto this namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_pond_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_pond_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) endif diff --git a/cicecore/cicedyn/analysis/ice_history_shared.F90 b/cicecore/cicedyn/analysis/ice_history_shared.F90 index d9c62edde..70aa5e14c 100644 --- a/cicecore/cicedyn/analysis/ice_history_shared.F90 +++ b/cicecore/cicedyn/analysis/ice_history_shared.F90 @@ -40,7 +40,7 @@ module ice_history_shared logical (kind=log_kind), public :: & hist_avg ! if true, write averaged data instead of snapshots - character (len=char_len), public :: & + character (len=char_len_long), public :: & history_file , & ! output file for history incond_file ! output file for snapshot initial conditions diff --git a/cicecore/cicedyn/analysis/ice_history_snow.F90 b/cicecore/cicedyn/analysis/ice_history_snow.F90 index 0ec4144bf..62e65b5a3 100644 --- a/cicecore/cicedyn/analysis/ice_history_snow.F90 +++ b/cicecore/cicedyn/analysis/ice_history_snow.F90 @@ -69,6 +69,7 @@ subroutine init_hist_snow_2D (dt) use ice_history_shared, only: tstr2D, tcstr, define_hist_field use ice_fileunits, only: nu_nml, nml_filename, & get_fileunit, release_fileunit + use ice_fileunits, only: goto_nml real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -76,7 +77,10 @@ subroutine init_hist_snow_2D (dt) integer (kind=int_kind) :: ns integer (kind=int_kind) :: nml_error ! namelist i/o error flag real (kind=dbl_kind) :: rhofresh, secday - logical (kind=log_kind) :: tr_snow + logical (kind=log_kind) :: tr_snow + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=char_len) :: nml_name ! for namelist check + character(len=*), parameter :: subname = '(init_hist_snow_2D)' call icepack_query_tracer_flags(tr_snow_out=tr_snow) @@ -92,26 +96,42 @@ subroutine init_hist_snow_2D (dt) !----------------------------------------------------------------- if (my_task == master_task) then - write(nu_diag,*) subname,' Reading icefields_snow_nml' + nml_name = 'icefields_snow_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + ! open namelist file call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_snow_nml open file '// & + call abort_ice(subname//'ERROR: '//trim(nml_name)//' open file '// & trim(nml_filename), & file=__FILE__, line=__LINE__) endif + ! goto namelist in file + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + + ! read namelist nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=icefields_snow_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: icefields_snow_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) + endif else ! .not. tr_snow diff --git a/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 b/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 index 28a047c4e..e240fc8f1 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_eap.F90 @@ -25,6 +25,7 @@ module ice_dyn_eap p001, p027, p055, p111, p166, p222, p25, p333 use ice_fileunits, only: nu_diag, nu_dump_eap, nu_restart_eap use ice_exit, only: abort_ice + use ice_flux, only: rdg_shear ! use ice_timers, only: & ! ice_timer_start, ice_timer_stop, & ! timer_tmp1, timer_tmp2, timer_tmp3, timer_tmp4, & @@ -36,8 +37,7 @@ module ice_dyn_eap implicit none private - public :: eap, init_eap, write_restart_eap, read_restart_eap, & - alloc_dyn_eap + public :: eap, init_eap, write_restart_eap, read_restart_eap ! Look-up table needed for calculating structure tensor integer (int_kind), parameter :: & @@ -71,42 +71,16 @@ module ice_dyn_eap real (kind=dbl_kind) :: & puny, pi, pi2, piq, pih -!======================================================================= - - contains - -!======================================================================= -! Allocate space for all variables -! - subroutine alloc_dyn_eap + real (kind=dbl_kind), parameter :: & + kfriction = 0.45_dbl_kind - integer (int_kind) :: ierr + real (kind=dbl_kind), save :: & + invdx, invdy, invda, invsin - character(len=*), parameter :: subname = '(alloc_dyn_eap)' - allocate( a11_1 (nx_block,ny_block,max_blocks), & - a11_2 (nx_block,ny_block,max_blocks), & - a11_3 (nx_block,ny_block,max_blocks), & - a11_4 (nx_block,ny_block,max_blocks), & - a12_1 (nx_block,ny_block,max_blocks), & - a12_2 (nx_block,ny_block,max_blocks), & - a12_3 (nx_block,ny_block,max_blocks), & - a12_4 (nx_block,ny_block,max_blocks), & - e11 (nx_block,ny_block,max_blocks), & - e12 (nx_block,ny_block,max_blocks), & - e22 (nx_block,ny_block,max_blocks), & - yieldstress11(nx_block,ny_block,max_blocks), & - yieldstress12(nx_block,ny_block,max_blocks), & - yieldstress22(nx_block,ny_block,max_blocks), & - s11 (nx_block,ny_block,max_blocks), & - s12 (nx_block,ny_block,max_blocks), & - s22 (nx_block,ny_block,max_blocks), & - a11 (nx_block,ny_block,max_blocks), & - a12 (nx_block,ny_block,max_blocks), & - stat=ierr) - if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory') +!======================================================================= - end subroutine alloc_dyn_eap + contains !======================================================================= ! Elastic-anisotropic-plastic dynamics driver @@ -134,7 +108,8 @@ subroutine eap (dt) dyn_prep1, dyn_prep2, stepu, dyn_finish, & seabed_stress_factor_LKD, seabed_stress_factor_prob, & seabed_stress_method, seabed_stress, & - stack_fields, unstack_fields, iceTmask, iceUmask + stack_fields, unstack_fields, iceTmask, iceUmask, & + fld2, fld3, fld4 use ice_flux, only: rdg_conv, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & @@ -186,11 +161,6 @@ subroutine eap (dt) umass , & ! total mass of ice and snow (u grid) umassdti ! mass of U-cell/dte (kg/m^2 s) - real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:), & ! temporary for stacking fields for halo update - fld3(:,:,:,:), & ! temporary for stacking fields for halo update - fld4(:,:,:,:) ! temporary for stacking fields for halo update - real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & strtmp ! stress combinations for momentum equation @@ -214,10 +184,6 @@ subroutine eap (dt) ! Initialize !----------------------------------------------------------------- - allocate(fld2(nx_block,ny_block,2,max_blocks)) - allocate(fld3(nx_block,ny_block,3,max_blocks)) - allocate(fld4(nx_block,ny_block,4,max_blocks)) - ! This call is needed only if dt changes during runtime. ! call set_evp_parameters (dt) @@ -226,7 +192,7 @@ subroutine eap (dt) do j = 1, ny_block do i = 1, nx_block rdg_conv (i,j,iblk) = c0 -! rdg_shear(i,j,iblk) = c0 + rdg_shear(i,j,iblk) = c0 ! always zero. Could be moved divu (i,j,iblk) = c0 shear(i,j,iblk) = c0 e11(i,j,iblk) = c0 @@ -554,7 +520,6 @@ subroutine eap (dt) enddo ! subcycling - deallocate(fld2,fld3,fld4) if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) !----------------------------------------------------------------- @@ -588,6 +553,8 @@ subroutine init_eap use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks + use ice_calendar, only: dt_dyn + use ice_dyn_shared, only: init_dyn_shared ! local variables @@ -599,7 +566,7 @@ subroutine init_eap eps6 = 1.0e-6_dbl_kind integer (kind=int_kind) :: & - ix, iy, iz, ia + ix, iy, iz, ia, ierr integer (kind=int_kind), parameter :: & nz = 100 @@ -609,6 +576,8 @@ subroutine init_eap da, dx, dy, dz, & phi + real (kind=dbl_kind) :: invstressconviso + character(len=*), parameter :: subname = '(init_eap)' call icepack_query_parameters(puny_out=puny, & @@ -619,6 +588,31 @@ subroutine init_eap phi = pi/c12 ! diamond shaped floe smaller angle (default phi = 30 deg) + call init_dyn_shared(dt_dyn) + + allocate( a11_1 (nx_block,ny_block,max_blocks), & + a11_2 (nx_block,ny_block,max_blocks), & + a11_3 (nx_block,ny_block,max_blocks), & + a11_4 (nx_block,ny_block,max_blocks), & + a12_1 (nx_block,ny_block,max_blocks), & + a12_2 (nx_block,ny_block,max_blocks), & + a12_3 (nx_block,ny_block,max_blocks), & + a12_4 (nx_block,ny_block,max_blocks), & + e11 (nx_block,ny_block,max_blocks), & + e12 (nx_block,ny_block,max_blocks), & + e22 (nx_block,ny_block,max_blocks), & + yieldstress11(nx_block,ny_block,max_blocks), & + yieldstress12(nx_block,ny_block,max_blocks), & + yieldstress22(nx_block,ny_block,max_blocks), & + s11 (nx_block,ny_block,max_blocks), & + s12 (nx_block,ny_block,max_blocks), & + s22 (nx_block,ny_block,max_blocks), & + a11 (nx_block,ny_block,max_blocks), & + a12 (nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory') + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) do iblk = 1, nblocks do j = 1, ny_block @@ -640,6 +634,7 @@ subroutine init_eap a12_2 (i,j,iblk) = c0 a12_3 (i,j,iblk) = c0 a12_4 (i,j,iblk) = c0 + rdg_shear (i,j,iblk) = c0 enddo ! i enddo ! j enddo ! iblk @@ -657,6 +652,9 @@ subroutine init_eap zinit = -pih dy = pi/real(ny_yield-1,kind=dbl_kind) yinit = -dy + invdx = c1/dx + invdy = c1/dy + invda = c1/da do ia=1,na_yield do ix=1,nx_yield @@ -712,6 +710,12 @@ subroutine init_eap enddo enddo + ! Factor to maintain the same stress as in EVP (see Section 3) + ! Can be set to 1 otherwise + + invstressconviso = c1/(c1+kfriction*kfriction) + invsin = c1/sin(pi2/c12) * invstressconviso + end subroutine init_eap !======================================================================= @@ -1590,22 +1594,12 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & rotstemp11s, rotstemp12s, rotstemp22s, & sig11, sig12, sig22, & sgprm11, sgprm12, sgprm22, & - invstressconviso, & Angle_denom_gamma, Angle_denom_alpha, & Tany_1, Tany_2, & x, y, dx, dy, da, & dtemp1, dtemp2, atempprime, & kxw, kyw, kaw - real (kind=dbl_kind), save :: & - invdx, invdy, invda, invsin - - logical (kind=log_kind), save :: & - first_call = .true. - - real (kind=dbl_kind), parameter :: & - kfriction = 0.45_dbl_kind - ! tcraig, temporary, should be moved to namelist ! turns on interpolation in stress_rdg logical(kind=log_kind), parameter :: & @@ -1613,14 +1607,6 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & character(len=*), parameter :: subname = '(update_stress_rdg)' - ! Factor to maintain the same stress as in EVP (see Section 3) - ! Can be set to 1 otherwise - - if (first_call) then - invstressconviso = c1/(c1+kfriction*kfriction) - invsin = c1/sin(pi2/c12) * invstressconviso - endif - ! compute eigenvalues, eigenvectors and angles for structure tensor, strain rates ! 1) structure tensor @@ -1717,17 +1703,6 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & if (y > pi) y = y - pi if (y < 0) y = y + pi - ! Now calculate updated stress tensor - - if (first_call) then - dx = pi/real(nx_yield-1,kind=dbl_kind) - dy = pi/real(ny_yield-1,kind=dbl_kind) - da = p5/real(na_yield-1,kind=dbl_kind) - invdx = c1/dx - invdy = c1/dy - invda = c1/da - endif - if (interpolate_stress_rdg) then ! Interpolated lookup @@ -1869,8 +1844,6 @@ subroutine update_stress_rdg (ksub, ndte, divu, tension, & + rotstemp22s*dtemp22 endif - first_call = .false. - end subroutine update_stress_rdg !======================================================================= diff --git a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 index 301a89916..cf111cccf 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 @@ -84,12 +84,6 @@ module ice_dyn_evp emass (:,:,:) , & ! total mass of ice and snow (E grid) emassdti (:,:,:) ! mass of E-cell/dte (kg/m^2 s) - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - ratiodxN , & ! - dxN(i+1,j) / dxN(i,j) - ratiodyE , & ! - dyE(i ,j+1) / dyE(i,j) - ratiodxNr , & ! 1 / ratiodxN - ratiodyEr ! 1 / ratiodyE - real (kind=dbl_kind), allocatable :: & strengthU(:,:,:) , & ! strength averaged to U points divergU (:,:,:) , & ! div array on U points, differentiate from divu @@ -125,33 +119,19 @@ module ice_dyn_evp ! Elastic-viscous-plastic dynamics driver ! subroutine init_evp - use ice_blocks, only: get_block, nx_block, ny_block, nghost, block + use ice_blocks, only: nx_block, ny_block use ice_domain_size, only: max_blocks - use ice_domain, only: nblocks, blocks_ice - use ice_grid, only: grid_ice, dyT, dxT, uarear, tmask, G_HTE, G_HTN, dxN, dyE + use ice_grid, only: grid_ice use ice_calendar, only: dt_dyn - use ice_dyn_shared, only: init_dyn_shared, evp_algorithm - use ice_dyn_evp1d, only: dyn_evp1d_init + use ice_dyn_shared, only: init_dyn_shared !allocate c and cd grid var. Follow structucre of eap integer (int_kind) :: ierr - character(len=*), parameter :: subname = '(init_evp)' - - type (block) :: & - this_block ! block information for current block - - integer (kind=int_kind) :: & - i, j, iblk , & ! block index - ilo,ihi,jlo,jhi ! beginning and end of physical domain - + character(len=*), parameter :: subname = '(alloc_dyn_evp)' call init_dyn_shared(dt_dyn) - if (evp_algorithm == "shared_mem_1d" ) then - call dyn_evp1d_init - endif - allocate( uocnU (nx_block,ny_block,max_blocks), & ! i ocean current (m/s) vocnU (nx_block,ny_block,max_blocks), & ! j ocean current (m/s) ss_tltxU (nx_block,ny_block,max_blocks), & ! sea surface slope, x-direction (m/m) @@ -212,37 +192,10 @@ subroutine init_evp stat=ierr) if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory E evp') - allocate( ratiodxN (nx_block,ny_block,max_blocks), & - ratiodyE (nx_block,ny_block,max_blocks), & - ratiodxNr(nx_block,ny_block,max_blocks), & - ratiodyEr(nx_block,ny_block,max_blocks), & - stat=ierr) - if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory ratio') - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - ratiodxN (i,j,iblk) = - dxN(i+1,j ,iblk) / dxN(i,j,iblk) - ratiodyE (i,j,iblk) = - dyE(i ,j+1,iblk) / dyE(i,j,iblk) - ratiodxNr(i,j,iblk) = c1 / ratiodxN(i,j,iblk) - ratiodyEr(i,j,iblk) = c1 / ratiodyE(i,j,iblk) - enddo - enddo - enddo ! iblk - !$OMP END PARALLEL DO - endif end subroutine init_evp -!======================================================================= #ifdef CICE_IN_NEMO ! Wind stress is set during this routine from the values supplied ! via NEMO (unless calc_strair is true). These values are supplied @@ -260,7 +213,7 @@ subroutine evp (dt) ice_HaloDestroy, ice_HaloUpdate_stress use ice_blocks, only: block, get_block, nx_block, ny_block, nghost use ice_domain, only: nblocks, blocks_ice, halo_info, maskhalo_dyn - use ice_domain_size, only: max_blocks, ncat + use ice_domain_size, only: max_blocks, ncat, nx_global, ny_global use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & @@ -279,20 +232,23 @@ subroutine evp (dt) stresspU, stressmU, stress12U use ice_grid, only: tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & dxE, dxN, dxT, dxU, dyE, dyN, dyT, dyU, & + ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & + dxhy, dyhx, cxp, cyp, cxm, cym, & tarear, uarear, earear, narear, grid_average_X2Y, uarea, & grid_type, grid_ice, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, uvelN, vvelN, & - uvelE, vvelE, divu, shear, vort, & + uvelE, vvelE, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & - ice_timer_start, ice_timer_stop, timer_evp + ice_timer_start, ice_timer_stop, timer_evp_1d, timer_evp_2d + use ice_dyn_evp_1d, only: ice_dyn_evp_1d_copyin, ice_dyn_evp_1d_kernel, & + ice_dyn_evp_1d_copyout use ice_dyn_shared, only: evp_algorithm, stack_fields, unstack_fields, & DminTarea, visc_method, deformations, deformationsC_T, deformationsCD_T, & - strain_rates_U, dxhy, dyhx, cxp, cyp, cxm, cym, & + strain_rates_U, & iceTmask, iceUmask, iceEmask, iceNmask, & dyn_haloUpdate, fld2, fld3, fld4 - use ice_dyn_evp1d, only: dyn_evp1d_run real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -385,7 +341,6 @@ subroutine evp (dt) rdg_shear(i,j,iblk) = c0 divu (i,j,iblk) = c0 shear(i,j,iblk) = c0 - vort (i,j,iblk) = c0 enddo enddo @@ -808,14 +763,12 @@ subroutine evp (dt) icellE (iblk), & indxEi (:,iblk), indxEj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), TbE (:,:,iblk), & - grid_location='E') + hwater(:,:,iblk), TbE (:,:,iblk)) call seabed_stress_factor_LKD (nx_block , ny_block, & icellN (iblk), & indxNi (:,iblk), indxNj(:,iblk), & vice (:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), TbN (:,:,iblk), & - grid_location='N') + hwater(:,:,iblk), TbN (:,:,iblk)) enddo !$OMP END PARALLEL DO @@ -838,23 +791,40 @@ subroutine evp (dt) endif - call ice_timer_start(timer_evp) + if (evp_algorithm == "shared_mem_1d" ) then - if (grid_ice == "B") then + if (trim(grid_type) == 'tripole') then + call abort_ice(trim(subname)//' & + & Kernel not tested on tripole grid. Set evp_algorithm=standard_2d') + endif - if (evp_algorithm == "shared_mem_1d" ) then + call ice_timer_start(timer_evp_1d) + call ice_dyn_evp_1d_copyin( & + nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & + iceTmask, iceUmask, & + cdn_ocnU,aiU,uocnU,vocnU,forcexU,forceyU,TbU, & + umassdti,fmU,uarear,tarear,strintxU,strintyU,uvel_init,vvel_init,& + strength,uvel,vvel,dxT,dyT, & + stressp_1 ,stressp_2, stressp_3, stressp_4, & + stressm_1 ,stressm_2, stressm_3, stressm_4, & + stress12_1,stress12_2,stress12_3,stress12_4 ) + call ice_dyn_evp_1d_kernel() + call ice_dyn_evp_1d_copyout( & + nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & +!strocn uvel,vvel, strocnxU,strocnyU, strintxU,strintyU, & + uvel,vvel, strintxU,strintyU, & + stressp_1, stressp_2, stressp_3, stressp_4, & + stressm_1, stressm_2, stressm_3, stressm_4, & + stress12_1,stress12_2,stress12_3,stress12_4, & + divu,rdg_conv,rdg_shear,shear,taubxU,taubyU ) + call ice_timer_stop(timer_evp_1d) + + else ! evp_algorithm == standard_2d (Standard CICE) + + call ice_timer_start(timer_evp_2d) - call dyn_evp1d_run(stressp_1 , stressp_2, stressp_3 , stressp_4 , & - stressm_1 , stressm_2 , stressm_3 , stressm_4 , & - stress12_1, stress12_2, stress12_3, stress12_4, & - strength , & - cdn_ocnU , aiu , uocnU , vocnU , & - waterxU , wateryU , forcexU , forceyU , & - umassdti , fmU , strintxU , strintyU , & - Tbu , taubxU , taubyU , uvel , & - vvel , icetmask , iceUmask) + if (grid_ice == "B") then - else ! evp_algorithm == standard_2d (Standard CICE) do ksub = 1,ndte ! subcycling !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) @@ -879,7 +849,7 @@ subroutine evp (dt) stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & stress12_1(:,:,iblk), stress12_2(:,:,iblk), & stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - strtmp (:,:,:)) + strtmp (:,:,:) ) !----------------------------------------------------------------- ! momentum equation @@ -909,406 +879,406 @@ subroutine evp (dt) uvel, vvel) enddo ! sub cycling - endif ! evp algorithm - - !----------------------------------------------------------------- - ! save quantities for mechanical redistribution - !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - call deformations (nx_block , ny_block , & - icellT (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - dxU (:,:,iblk), dyU (:,:,iblk), & - cxp (:,:,iblk), cyp (:,:,iblk), & - cxm (:,:,iblk), cym (:,:,iblk), & - tarear (:,:,iblk), vort (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) - enddo - !$OMP END PARALLEL DO - - elseif (grid_ice == "C") then - - do ksub = 1,ndte ! subcycling - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks !----------------------------------------------------------------- - ! strain rates at U point - ! NOTE these are actually strain rates * area (m^2/s) + ! save quantities for mechanical redistribution !----------------------------------------------------------------- - call strain_rates_U (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxE (:,:,iblk), dyN (:,:,iblk), & - dxU (:,:,iblk), dyU (:,:,iblk), & - ratiodxN(:,:,iblk), ratiodxNr(:,:,iblk), & - ratiodyE(:,:,iblk), ratiodyEr(:,:,iblk), & - epm (:,:,iblk), npm (:,:,iblk), & - divergU (:,:,iblk), tensionU (:,:,iblk), & - shearU (:,:,iblk), deltaU (:,:,iblk) ) - - enddo ! iblk - !$OMP END PARALLEL DO - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_NEcorner, field_type_scalar, & - shearU) - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call stressC_T (nx_block , ny_block , & - icellT (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - uarea (:,:,iblk), DminTarea (:,:,iblk), & - strength (:,:,iblk), shearU (:,:,iblk), & - zetax2T (:,:,iblk), etax2T (:,:,iblk), & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12T (:,:,iblk)) - - enddo - !$OMP END PARALLEL DO - - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_center, field_type_scalar, & - zetax2T, etax2T, stresspT, stressmT) - - if (visc_method == 'avg_strength') then - call grid_average_X2Y('S', strength, 'T', strengthU, 'U') - elseif (visc_method == 'avg_zeta') then - call grid_average_X2Y('S', etax2T , 'T', etax2U , 'U') - endif - - !$OMP PARALLEL DO PRIVATE(iblk) + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks - call stressC_U (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - uarea (:,:,iblk), & - etax2U (:,:,iblk), deltaU (:,:,iblk), & - strengthU (:,:,iblk), shearU (:,:,iblk), & - stress12U (:,:,iblk)) + call deformations (nx_block , ny_block , & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + cxp (:,:,iblk), cyp (:,:,iblk), & + cxm (:,:,iblk), cym (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) enddo !$OMP END PARALLEL DO - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info , halo_info_mask, & - field_loc_NEcorner, field_type_scalar, & - stress12U) - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks + elseif (grid_ice == "C") then - call div_stress_Ex (nx_block , ny_block , & - icellE (iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - dxE (:,:,iblk), dyE (:,:,iblk), & - dxU (:,:,iblk), dyT (:,:,iblk), & - earear (:,:,iblk) , & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12U (:,:,iblk), strintxE (:,:,iblk) ) - - call div_stress_Ny (nx_block , ny_block , & - icellN (iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - dxN (:,:,iblk), dyN (:,:,iblk), & - dxT (:,:,iblk), dyU (:,:,iblk), & - narear (:,:,iblk) , & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12U (:,:,iblk), strintyN (:,:,iblk) ) + do ksub = 1,ndte ! subcycling - enddo - !$OMP END PARALLEL DO + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks + !----------------------------------------------------------------- + ! strain rates at U point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + call strain_rates_U (nx_block , ny_block , & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxE (:,:,iblk), dyN (:,:,iblk), & + dxU (:,:,iblk), dyU (:,:,iblk), & + ratiodxN(:,:,iblk), ratiodxNr(:,:,iblk), & + ratiodyE(:,:,iblk), ratiodyEr(:,:,iblk), & + epm (:,:,iblk), npm (:,:,iblk), & + divergU (:,:,iblk), tensionU (:,:,iblk), & + shearU (:,:,iblk), deltaU (:,:,iblk) ) - call stepu_C (nx_block , ny_block , & ! u, E point - icellE (iblk), Cdn_ocnE (:,:,iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - aiE (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - waterxE (:,:,iblk), forcexE (:,:,iblk), & - emassdti (:,:,iblk), fmE (:,:,iblk), & - strintxE (:,:,iblk), taubxE (:,:,iblk), & - uvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) + enddo ! iblk + !$OMP END PARALLEL DO - call stepv_C (nx_block, ny_block, & ! v, N point - icellN (iblk), Cdn_ocnN (:,:,iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - aiN (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - wateryN (:,:,iblk), forceyN (:,:,iblk), & - nmassdti (:,:,iblk), fmN (:,:,iblk), & - strintyN (:,:,iblk), taubyN (:,:,iblk), & - vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) - enddo - !$OMP END PARALLEL DO + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_scalar, & + shearU) - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_Eface, field_type_vector, & - uvelE) - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_Nface, field_type_vector, & - vvelN) + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call stressC_T (nx_block , ny_block , & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + uarea (:,:,iblk), DminTarea (:,:,iblk), & + strength (:,:,iblk), shearU (:,:,iblk), & + zetax2T (:,:,iblk), etax2T (:,:,iblk), & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12T (:,:,iblk)) - call grid_average_X2Y('A', uvelE, 'E', uvelN, 'N') - call grid_average_X2Y('A', vvelN, 'N', vvelE, 'E') - uvelN(:,:,:) = uvelN(:,:,:)*npm(:,:,:) - vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) + enddo + !$OMP END PARALLEL DO - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_Nface, field_type_vector, & - uvelN) - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_Eface, field_type_vector, & - vvelE) + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_center, field_type_scalar, & + zetax2T, etax2T, stresspT, stressmT) + + if (visc_method == 'avg_strength') then + call grid_average_X2Y('S', strength, 'T', strengthU, 'U') + elseif (visc_method == 'avg_zeta') then + call grid_average_X2Y('S', etax2T , 'T', etax2U , 'U') + endif - call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') - call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call stressC_U (nx_block , ny_block , & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & + uarea (:,:,iblk), & + etax2U (:,:,iblk), deltaU (:,:,iblk), & + strengthU (:,:,iblk), shearU (:,:,iblk), & + stress12U (:,:,iblk)) + enddo + !$OMP END PARALLEL DO - uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) - vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) - ! U fields at NE corner - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_NEcorner, field_type_vector, & - uvel, vvel) + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info , halo_info_mask, & + field_loc_NEcorner, field_type_scalar, & + stress12U) - enddo ! subcycling + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks - !----------------------------------------------------------------- - ! save quantities for mechanical redistribution - !----------------------------------------------------------------- + call div_stress_Ex (nx_block , ny_block , & + icellE (iblk), & + indxEi (:,iblk), indxEj (:,iblk), & + dxE (:,:,iblk), dyE (:,:,iblk), & + dxU (:,:,iblk), dyT (:,:,iblk), & + earear (:,:,iblk) , & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), strintxE (:,:,iblk) ) + + call div_stress_Ny (nx_block , ny_block , & + icellN (iblk), & + indxNi (:,iblk), indxNj (:,iblk), & + dxN (:,:,iblk), dyN (:,:,iblk), & + dxT (:,:,iblk), dyU (:,:,iblk), & + narear (:,:,iblk) , & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), strintyN (:,:,iblk) ) - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - call deformationsC_T (nx_block , ny_block , & - icellT (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), uarea (:,:,iblk), & - shearU (:,:,iblk), vort (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) - enddo - !$OMP END PARALLEL DO + enddo + !$OMP END PARALLEL DO - elseif (grid_ice == "CD") then + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks - do ksub = 1,ndte ! subcycling + call stepu_C (nx_block , ny_block , & ! u, E point + icellE (iblk), Cdn_ocnE (:,:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & + aiE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + waterxE (:,:,iblk), forcexE (:,:,iblk), & + emassdti (:,:,iblk), fmE (:,:,iblk), & + strintxE (:,:,iblk), taubxE (:,:,iblk), & + uvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call stressCD_T (nx_block , ny_block , & - icellT (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - DminTarea (:,:,iblk), & - strength (:,:,iblk), & - zetax2T (:,:,iblk), etax2T (:,:,iblk), & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12T (:,:,iblk) ) + call stepv_C (nx_block, ny_block, & ! v, N point + icellN (iblk), Cdn_ocnN (:,:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & + aiN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + wateryN (:,:,iblk), forceyN (:,:,iblk), & + nmassdti (:,:,iblk), fmN (:,:,iblk), & + strintyN (:,:,iblk), taubyN (:,:,iblk), & + vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) + enddo + !$OMP END PARALLEL DO - enddo - !$OMP END PARALLEL DO + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_Eface, field_type_vector, & + uvelE) + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_Nface, field_type_vector, & + vvelN) + + call grid_average_X2Y('A', uvelE, 'E', uvelN, 'N') + call grid_average_X2Y('A', vvelN, 'N', vvelE, 'E') + uvelN(:,:,:) = uvelN(:,:,:)*npm(:,:,:) + vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_center, field_type_scalar, & - zetax2T, etax2T) + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_Nface, field_type_vector, & + uvelN) + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_Eface, field_type_vector, & + vvelE) + + call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') + call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') + + uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) + vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) + ! U fields at NE corner + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_vector, & + uvel, vvel) - if (visc_method == 'avg_strength') then - call grid_average_X2Y('S', strength, 'T', strengthU, 'U') - elseif (visc_method == 'avg_zeta') then - call grid_average_X2Y('S', zetax2T , 'T', zetax2U , 'U') - call grid_average_X2Y('S', etax2T , 'T', etax2U , 'U') - endif + enddo ! subcycling - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks !----------------------------------------------------------------- - ! strain rates at U point - ! NOTE these are actually strain rates * area (m^2/s) + ! save quantities for mechanical redistribution !----------------------------------------------------------------- - call strain_rates_U (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxE (:,:,iblk), dyN (:,:,iblk), & - dxU (:,:,iblk), dyU (:,:,iblk), & - ratiodxN (:,:,iblk), ratiodxNr(:,:,iblk), & - ratiodyE (:,:,iblk), ratiodyEr(:,:,iblk), & - epm (:,:,iblk), npm (:,:,iblk), & - divergU (:,:,iblk), tensionU (:,:,iblk), & - shearU (:,:,iblk), DeltaU (:,:,iblk) ) - - call stressCD_U (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - uarea (:,:,iblk), & - zetax2U (:,:,iblk), etax2U (:,:,iblk), & - strengthU(:,:,iblk), & - divergU (:,:,iblk), tensionU (:,:,iblk), & - shearU (:,:,iblk), DeltaU (:,:,iblk), & - stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12U(:,:,iblk)) + + !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) + do iblk = 1, nblocks + call deformationsC_T (nx_block , ny_block , & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), uarea (:,:,iblk), & + shearU (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) enddo !$OMP END PARALLEL DO - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_center, field_type_scalar, & - stresspT, stressmT, stress12T) - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_NEcorner,field_type_scalar, & - stresspU, stressmU, stress12U) + elseif (grid_ice == "CD") then - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks + do ksub = 1,ndte ! subcycling - call div_stress_Ex (nx_block , ny_block , & - icellE (iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - dxE (:,:,iblk), dyE (:,:,iblk), & - dxU (:,:,iblk), dyT (:,:,iblk), & - earear (:,:,iblk) , & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12U (:,:,iblk), strintxE (:,:,iblk) ) - - call div_stress_Ey (nx_block , ny_block , & - icellE (iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - dxE (:,:,iblk), dyE (:,:,iblk), & - dxU (:,:,iblk), dyT (:,:,iblk), & - earear (:,:,iblk) , & - stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12T (:,:,iblk), strintyE (:,:,iblk) ) - - call div_stress_Nx (nx_block , ny_block , & - icellN (iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - dxN (:,:,iblk), dyN (:,:,iblk), & - dxT (:,:,iblk), dyU (:,:,iblk), & - narear (:,:,iblk) , & - stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12T (:,:,iblk), strintxN (:,:,iblk) ) - - call div_stress_Ny (nx_block , ny_block , & - icellN (iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - dxN (:,:,iblk), dyN (:,:,iblk), & - dxT (:,:,iblk), dyU (:,:,iblk), & - narear (:,:,iblk) , & + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call stressCD_T (nx_block , ny_block , & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + DminTarea (:,:,iblk), & + strength (:,:,iblk), & + zetax2T (:,:,iblk), etax2T (:,:,iblk), & stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12U (:,:,iblk), strintyN (:,:,iblk) ) + stress12T (:,:,iblk) ) - enddo - !$OMP END PARALLEL DO + enddo + !$OMP END PARALLEL DO - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_center, field_type_scalar, & + zetax2T, etax2T) + + if (visc_method == 'avg_strength') then + call grid_average_X2Y('S', strength, 'T', strengthU, 'U') + elseif (visc_method == 'avg_zeta') then + call grid_average_X2Y('S', zetax2T , 'T', zetax2U , 'U') + call grid_average_X2Y('S', etax2T , 'T', etax2U , 'U') + endif - call stepuv_CD (nx_block , ny_block , & ! E point - icellE (iblk), Cdn_ocnE (:,:,iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - aiE (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - waterxE (:,:,iblk), wateryE (:,:,iblk), & - forcexE (:,:,iblk), forceyE (:,:,iblk), & - emassdti (:,:,iblk), fmE (:,:,iblk), & - strintxE (:,:,iblk), strintyE (:,:,iblk), & - taubxE (:,:,iblk), taubyE (:,:,iblk), & - uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) - - call stepuv_CD (nx_block , ny_block , & ! N point - icellN (iblk), Cdn_ocnN (:,:,iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - aiN (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - waterxN (:,:,iblk), wateryN (:,:,iblk), & - forcexN (:,:,iblk), forceyN (:,:,iblk), & - nmassdti (:,:,iblk), fmN (:,:,iblk), & - strintxN (:,:,iblk), strintyN (:,:,iblk), & - taubxN (:,:,iblk), taubyN (:,:,iblk), & - uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) - enddo - !$OMP END PARALLEL DO + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + !----------------------------------------------------------------- + ! strain rates at U point + ! NOTE these are actually strain rates * area (m^2/s) + !----------------------------------------------------------------- + call strain_rates_U (nx_block , ny_block , & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + uvel (:,:,iblk), vvel (:,:,iblk), & + dxE (:,:,iblk), dyN (:,:,iblk), & + dxU (:,:,iblk), dyU (:,:,iblk), & + ratiodxN (:,:,iblk), ratiodxNr(:,:,iblk), & + ratiodyE (:,:,iblk), ratiodyEr(:,:,iblk), & + epm (:,:,iblk), npm (:,:,iblk), & + divergU (:,:,iblk), tensionU (:,:,iblk), & + shearU (:,:,iblk), DeltaU (:,:,iblk) ) + + call stressCD_U (nx_block , ny_block , & + icellU (iblk), & + indxUi (:,iblk), indxUj (:,iblk), & + uarea (:,:,iblk), & + zetax2U (:,:,iblk), etax2U (:,:,iblk), & + strengthU(:,:,iblk), & + divergU (:,:,iblk), tensionU (:,:,iblk), & + shearU (:,:,iblk), DeltaU (:,:,iblk), & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12U(:,:,iblk)) + enddo + !$OMP END PARALLEL DO + + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_center, field_type_scalar, & + stresspT, stressmT, stress12T) + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner,field_type_scalar, & + stresspU, stressmU, stress12U) + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + + call div_stress_Ex (nx_block , ny_block , & + icellE (iblk), & + indxEi (:,iblk), indxEj (:,iblk), & + dxE (:,:,iblk), dyE (:,:,iblk), & + dxU (:,:,iblk), dyT (:,:,iblk), & + earear (:,:,iblk) , & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), strintxE (:,:,iblk) ) + + call div_stress_Ey (nx_block , ny_block , & + icellE (iblk), & + indxEi (:,iblk), indxEj (:,iblk), & + dxE (:,:,iblk), dyE (:,:,iblk), & + dxU (:,:,iblk), dyT (:,:,iblk), & + earear (:,:,iblk) , & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12T (:,:,iblk), strintyE (:,:,iblk) ) + + call div_stress_Nx (nx_block , ny_block , & + icellN (iblk), & + indxNi (:,iblk), indxNj (:,iblk), & + dxN (:,:,iblk), dyN (:,:,iblk), & + dxT (:,:,iblk), dyU (:,:,iblk), & + narear (:,:,iblk) , & + stresspU (:,:,iblk), stressmU (:,:,iblk), & + stress12T (:,:,iblk), strintxN (:,:,iblk) ) + + call div_stress_Ny (nx_block , ny_block , & + icellN (iblk), & + indxNi (:,iblk), indxNj (:,iblk), & + dxN (:,:,iblk), dyN (:,:,iblk), & + dxT (:,:,iblk), dyU (:,:,iblk), & + narear (:,:,iblk) , & + stresspT (:,:,iblk), stressmT (:,:,iblk), & + stress12U (:,:,iblk), strintyN (:,:,iblk) ) + + enddo + !$OMP END PARALLEL DO - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_Eface, field_type_vector, & - uvelE, vvelE) - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_Nface, field_type_vector, & - uvelN, vvelN) + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks - call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') - call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') + call stepuv_CD (nx_block , ny_block , & ! E point + icellE (iblk), Cdn_ocnE (:,:,iblk), & + indxEi (:,iblk), indxEj (:,iblk), & + aiE (:,:,iblk), & + uocnE (:,:,iblk), vocnE (:,:,iblk), & + waterxE (:,:,iblk), wateryE (:,:,iblk), & + forcexE (:,:,iblk), forceyE (:,:,iblk), & + emassdti (:,:,iblk), fmE (:,:,iblk), & + strintxE (:,:,iblk), strintyE (:,:,iblk), & + taubxE (:,:,iblk), taubyE (:,:,iblk), & + uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + TbE (:,:,iblk)) + + call stepuv_CD (nx_block , ny_block , & ! N point + icellN (iblk), Cdn_ocnN (:,:,iblk), & + indxNi (:,iblk), indxNj (:,iblk), & + aiN (:,:,iblk), & + uocnN (:,:,iblk), vocnN (:,:,iblk), & + waterxN (:,:,iblk), wateryN (:,:,iblk), & + forcexN (:,:,iblk), forceyN (:,:,iblk), & + nmassdti (:,:,iblk), fmN (:,:,iblk), & + strintxN (:,:,iblk), strintyN (:,:,iblk), & + taubxN (:,:,iblk), taubyN (:,:,iblk), & + uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + TbN (:,:,iblk)) + enddo + !$OMP END PARALLEL DO - uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) - vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) + ! calls ice_haloUpdate, controls bundles and masks + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_Eface, field_type_vector, & + uvelE, vvelE) + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_Nface, field_type_vector, & + uvelN, vvelN) + + call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') + call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') + + uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) + vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) ! U fields at NE corner ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_NEcorner, field_type_vector, & - uvel, vvel) + call dyn_haloUpdate (halo_info, halo_info_mask, & + field_loc_NEcorner, field_type_vector, & + uvel, vvel) - enddo ! subcycling + enddo ! subcycling - !----------------------------------------------------------------- - ! save quantities for mechanical redistribution - !----------------------------------------------------------------- + !----------------------------------------------------------------- + ! save quantities for mechanical redistribution + !----------------------------------------------------------------- - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call deformationsCD_T (nx_block , ny_block , & - icellT (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), vort (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) - enddo - !$OMP END PARALLEL DO - endif ! grid_ice + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + call deformationsCD_T (nx_block , ny_block , & + icellT (iblk), & + indxTi (:,iblk), indxTj (:,iblk), & + uvelE (:,:,iblk), vvelE (:,:,iblk), & + uvelN (:,:,iblk), vvelN (:,:,iblk), & + dxN (:,:,iblk), dyE (:,:,iblk), & + dxT (:,:,iblk), dyT (:,:,iblk), & + tarear (:,:,iblk), & + shear (:,:,iblk), divu (:,:,iblk), & + rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) + enddo + !$OMP END PARALLEL DO + endif ! grid_ice - call ice_timer_stop(timer_evp) + call ice_timer_stop(timer_evp_2d) + endif ! evp_algorithm if (maskhalo_dyn) then call ice_HaloDestroy(halo_info_mask) @@ -1467,7 +1437,7 @@ subroutine stress (nx_block, ny_block, & stress12_3, stress12_4, & str ) - use ice_dyn_shared, only: strain_rates, visc_replpress + use ice_dyn_shared, only: strain_rates, visc_replpress, capping integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1560,16 +1530,16 @@ subroutine stress (nx_block, ny_block, & !----------------------------------------------------------------- call visc_replpress (strength(i,j), DminTarea(i,j), Deltane, & - zetax2ne, etax2ne, rep_prsne) + zetax2ne, etax2ne, rep_prsne, capping) call visc_replpress (strength(i,j), DminTarea(i,j), Deltanw, & - zetax2nw, etax2nw, rep_prsnw) + zetax2nw, etax2nw, rep_prsnw, capping) call visc_replpress (strength(i,j), DminTarea(i,j), Deltasw, & - zetax2sw, etax2sw, rep_prssw) + zetax2sw, etax2sw, rep_prssw, capping) call visc_replpress (strength(i,j), DminTarea(i,j), Deltase, & - zetax2se, etax2se, rep_prsse) + zetax2se, etax2se, rep_prsse, capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -1577,6 +1547,7 @@ subroutine stress (nx_block, ny_block, & !----------------------------------------------------------------- ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code + stressp_1 (i,j) = (stressp_1 (i,j)*(c1-arlx1i*revp) & + arlx1i*(zetax2ne*divune - rep_prsne)) * denom1 stressp_2 (i,j) = (stressp_2 (i,j)*(c1-arlx1i*revp) & @@ -1763,7 +1734,7 @@ subroutine stressC_T (nx_block, ny_block , & stresspT , stressmT , & stress12T) - use ice_dyn_shared, only: strain_rates_T, & + use ice_dyn_shared, only: strain_rates_T, capping, & visc_replpress, e_factor integer (kind=int_kind), intent(in) :: & @@ -1856,7 +1827,7 @@ subroutine stressC_T (nx_block, ny_block , & !----------------------------------------------------------------- call visc_replpress (strength(i,j), DminTarea(i,j), DeltaT, & - zetax2T (i,j), etax2T(i,j), rep_prsT) + zetax2T (i,j), etax2T(i,j), rep_prsT, capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -1899,7 +1870,7 @@ subroutine stressC_U (nx_block , ny_block ,& stress12U) use ice_dyn_shared, only: visc_replpress, & - visc_method, deltaminEVP + visc_method, deltaminEVP, capping integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -1955,7 +1926,7 @@ subroutine stressC_U (nx_block , ny_block ,& ! only need etax2U here, but other terms are calculated with etax2U ! minimal extra calculations here even though it seems like there is call visc_replpress (strengthU(i,j), DminUarea, deltaU(i,j), & - lzetax2U , letax2U , lrep_prsU) + lzetax2U , letax2U , lrep_prsU , capping) stress12U(i,j) = (stress12U(i,j)*(c1-arlx1i*revp) & + arlx1i*p5*letax2U*shearU(i,j)) * denom1 enddo @@ -1983,7 +1954,7 @@ subroutine stressCD_T (nx_block, ny_block , & stresspT, stressmT , & stress12T) - use ice_dyn_shared, only: strain_rates_T, & + use ice_dyn_shared, only: strain_rates_T, capping, & visc_replpress integer (kind=int_kind), intent(in) :: & @@ -2053,7 +2024,7 @@ subroutine stressCD_T (nx_block, ny_block , & !----------------------------------------------------------------- call visc_replpress (strength(i,j), DminTarea(i,j), DeltaT(i,j), & - zetax2T (i,j), etax2T(i,j), rep_prsT) + zetax2T (i,j), etax2T(i,j), rep_prsT , capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -2092,7 +2063,7 @@ subroutine stressCD_U (nx_block, ny_block, & stress12U) use ice_dyn_shared, only: visc_replpress, & - visc_method, deltaminEVP + visc_method, deltaminEVP, capping integer (kind=int_kind), intent(in) :: & nx_block, ny_block, & ! block dimensions @@ -2150,7 +2121,7 @@ subroutine stressCD_U (nx_block, ny_block, & ! only need etax2U here, but other terms are calculated with etax2U ! minimal extra calculations here even though it seems like there is call visc_replpress (strengthU(i,j), DminUarea, deltaU(i,j), & - lzetax2U , letax2U , lrep_prsU ) + lzetax2U , letax2U , lrep_prsU , capping) endif !----------------------------------------------------------------- diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 b/cicecore/cicedyn/dynamics/ice_dyn_evp_1d.F90 similarity index 99% rename from cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 rename to cicecore/cicedyn/dynamics/ice_dyn_evp_1d.F90 index e3432eaab..ba8bda63d 100755 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp_1d.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_evp_1d.F90 @@ -901,7 +901,7 @@ subroutine evp1d_halo_update(NAVEL_len, lb, ub, uvel, vvel, & #ifdef _OPENACC !$acc parallel & - !$acc present(uvel, vvel) & + !$acc present(uvel, vvel) !$acc loop do iw = 1, NAVEL_len if (halo_parent(iw) == 0) cycle diff --git a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 index 84edea237..a12e6fddd 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_shared.F90 @@ -11,7 +11,7 @@ module ice_dyn_shared use ice_kinds_mod use ice_communicate, only: my_task, master_task, get_num_procs - use ice_constants, only: c0, c1, c2, c3, c4, c6, c1p5 + use ice_constants, only: c0, c1, c2, c3, c4, c6 use ice_constants, only: omega, spval_dbl, p01, p001, p5 use ice_blocks, only: nx_block, ny_block use ice_domain_size, only: max_blocks @@ -59,7 +59,7 @@ module ice_dyn_shared yield_curve , & ! 'ellipse' ('teardrop' needs further testing) visc_method , & ! method for viscosity calc at U points (C, CD grids) seabed_stress_method ! method for seabed stress calculation - ! LKD: Lemieux et al. 2015, probabilistic: Dupont et al. 2022 + ! LKD: Lemieux et al. 2015, probabilistic: Dupont et al. in prep. real (kind=dbl_kind), parameter, public :: & u0 = 5e-5_dbl_kind, & ! residual velocity for seabed stress (m/s) @@ -119,14 +119,6 @@ module ice_dyn_shared real (kind=dbl_kind), allocatable, public :: & DminTarea(:,:,:) ! deltamin * tarea (m^2/s) - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - cyp , & ! 1.5*HTE(i,j)-0.5*HTW(i,j) = 1.5*HTE(i,j)-0.5*HTE(i-1,j) - cxp , & ! 1.5*HTN(i,j)-0.5*HTS(i,j) = 1.5*HTN(i,j)-0.5*HTN(i,j-1) - cym , & ! 0.5*HTE(i,j)-1.5*HTW(i,j) = 0.5*HTE(i,j)-1.5*HTE(i-1,j) - cxm , & ! 0.5*HTN(i,j)-1.5*HTS(i,j) = 0.5*HTN(i,j)-1.5*HTN(i,j-1) - dxhy , & ! 0.5*(HTE(i,j) - HTW(i,j)) = 0.5*(HTE(i,j) - HTE(i-1,j)) - dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) - ! ice isotropic tensile strength parameter real (kind=dbl_kind), public :: & Ktens ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) @@ -200,22 +192,6 @@ subroutine alloc_dyn_shared stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of memory') - allocate( & - cyp(nx_block,ny_block,max_blocks), & ! 1.5*HTE - 0.5*HTW - cxp(nx_block,ny_block,max_blocks), & ! 1.5*HTN - 0.5*HTS - cym(nx_block,ny_block,max_blocks), & ! 0.5*HTE - 1.5*HTW - cxm(nx_block,ny_block,max_blocks), & ! 0.5*HTN - 1.5*HTS - stat=ierr) - if (ierr/=0) call abort_ice(subname//': Out of memory') - - if (grid_ice == 'B' .and. evp_algorithm == "standard_2d") then - allocate( & - dxhy(nx_block,ny_block,max_blocks), & ! 0.5*(HTE - HTW) - dyhx(nx_block,ny_block,max_blocks), & ! 0.5*(HTN - HTS) - stat=ierr) - if (ierr/=0) call abort_ice(subname//': Out of memory') - endif - if (grid_ice == 'CD' .or. grid_ice == 'C') then allocate( & uvelE_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep @@ -238,9 +214,8 @@ end subroutine alloc_dyn_shared subroutine init_dyn_shared (dt) - use ice_blocks, only: block, get_block - use ice_boundary, only: ice_halo, ice_haloUpdate - use ice_domain, only: nblocks, halo_dynbundle, blocks_ice, halo_info + use ice_blocks, only: nx_block, ny_block + use ice_domain, only: nblocks, halo_dynbundle use ice_domain_size, only: max_blocks use ice_flux, only: & stressp_1, stressp_2, stressp_3, stressp_4, & @@ -249,8 +224,7 @@ subroutine init_dyn_shared (dt) stresspT, stressmT, stress12T, & stresspU, stressmU, stress12U use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN - use ice_grid, only: ULAT, NLAT, ELAT, tarea, HTE, HTN - use ice_constants, only: field_loc_center, field_type_vector + use ice_grid, only: ULAT, NLAT, ELAT, tarea real (kind=dbl_kind), intent(in) :: & dt ! time step @@ -258,13 +232,9 @@ subroutine init_dyn_shared (dt) ! local variables integer (kind=int_kind) :: & - i, j , & ! indices - ilo, ihi, jlo, jhi, & !min and max index for interior of blocks - nprocs, & ! number of processors - iblk ! block index - - type (block) :: & - this_block ! block information for current block + i, j , & ! indices + nprocs, & ! number of processors + iblk ! block index character(len=*), parameter :: subname = '(init_dyn_shared)' @@ -363,48 +333,6 @@ subroutine init_dyn_shared (dt) enddo ! iblk !$OMP END PARALLEL DO - if (grid_ice == 'B' .and. evp_algorithm == "standard_2d") then - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) - dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) - enddo - enddo - enddo - - call ice_HaloUpdate (dxhy, halo_info, & - field_loc_center, field_type_vector, & - fillValue=c1) - call ice_HaloUpdate (dyhx, halo_info, & - field_loc_center, field_type_vector, & - fillValue=c1) - - endif - - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi+1 - do i = ilo, ihi+1 - cyp(i,j,iblk) = (c1p5*HTE(i,j,iblk) - p5*HTE(i-1,j,iblk)) - cxp(i,j,iblk) = (c1p5*HTN(i,j,iblk) - p5*HTN(i,j-1,iblk)) - ! match order of operations in cyp, cxp for tripole grids - cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) - cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) - enddo - enddo - enddo - end subroutine init_dyn_shared !======================================================================= @@ -1361,7 +1289,7 @@ subroutine seabed_stress_factor_LKD (nx_block, ny_block, & real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & TbU ! seabed stress factor at 'grid_location' (N/m^2) - character(len=*), optional, intent(in) :: & + character(len=*), optional, intent(inout) :: & grid_location ! grid location (U, E, N), U assumed if not present real (kind=dbl_kind) :: & @@ -1419,9 +1347,8 @@ end subroutine seabed_stress_factor_LKD ! a normal distribution with sigma_b = 2.5d0. An improvement would ! be to provide the distribution based on high resolution data. ! -! Dupont, F., D. Dumont, J.F. Lemieux, E. Dumas-Lefebvre, A. Caya (2022). -! A probabilistic seabed-ice keel interaction model, The Cryosphere, 16, -! 1963-1977. +! Dupont, F. Dumont, D., Lemieux, J.F., Dumas-Lefebvre, E., Caya, A. +! in prep. ! ! authors: D. Dumont, J.F. Lemieux, E. Dumas-Lefebvre, F. Dupont ! @@ -1523,10 +1450,10 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & character(len=*), parameter :: subname = '(seabed_stress_factor_prob)' - call icepack_query_parameters(rhow_out=rhow, rhoi_out=rhoi,gravit_out=gravit,pi_out=pi,puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) + call icepack_query_parameters(rhow_out=rhow, rhoi_out=rhoi) + call icepack_query_parameters(gravit_out=gravit) + call icepack_query_parameters(pi_out=pi) + call icepack_query_parameters(puny_out=puny) Tbt=c0 @@ -1554,13 +1481,13 @@ subroutine seabed_stress_factor_prob (nx_block, ny_block, & do n =1, ncat v_i = v_i + vcat(n)**2 / (max(acat(n), puny)) enddo - v_i = max((v_i - m_i**2), puny) + v_i = v_i - m_i**2 mu_i = log(m_i/sqrt(c1 + v_i/m_i**2)) ! parameters for the log-normal sigma_i = sqrt(log(c1 + v_i/m_i**2)) ! max thickness associated with percentile of log-normal PDF - ! x_kmax=x997 was obtained from an optimization procedure (Dupont et al. 2022) + ! x_kmax=x997 was obtained from an optimization procedure (Dupont et al.) x_kmax = exp(mu_i + sqrt(c2*sigma_i)*1.9430d0) @@ -1711,10 +1638,9 @@ subroutine deformations (nx_block, ny_block, & indxTi, indxTj, & uvel, vvel, & dxT, dyT, & - dxU, dyU, & cxp, cyp, & cxm, cym, & - tarear, vort, & + tarear, & shear, divu, & rdg_conv, rdg_shear ) @@ -1733,8 +1659,6 @@ subroutine deformations (nx_block, ny_block, & vvel , & ! y-component of velocity (m/s) dxT , & ! width of T-cell through the middle (m) dyT , & ! height of T-cell through the middle (m) - dxU , & ! width of U-cell through the middle (m) - dyU , & ! height of U-cell through the middle (m) cyp , & ! 1.5*HTE - 0.5*HTW cxp , & ! 1.5*HTN - 0.5*HTS cym , & ! 0.5*HTE - 1.5*HTW @@ -1742,7 +1666,6 @@ subroutine deformations (nx_block, ny_block, & tarear ! 1/tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - vort , & ! vorticity (1/s) shear , & ! strain rate II component (1/s) divu , & ! strain rate I component, velocity divergence (1/s) rdg_conv , & ! convergence term for ridging (1/s) @@ -1760,9 +1683,6 @@ subroutine deformations (nx_block, ny_block, & Deltane, Deltanw, Deltase, Deltasw , & ! Delta tmp ! useful combination - real (kind=dbl_kind) :: & ! at edges for vorticity calc : - dvdxn, dvdxs, dudye, dudyw ! dvdx and dudy terms on edges - character(len=*), parameter :: subname = '(deformations)' do ij = 1, icellT @@ -1801,13 +1721,6 @@ subroutine deformations (nx_block, ny_block, & (tensionne + tensionnw + tensionse + tensionsw)**2 + & (shearne + shearnw + shearse + shearsw )**2) - ! vorticity - dvdxn = dyU(i,j)*vvel(i,j) - dyU(i-1,j)*vvel(i-1,j) - dvdxs = dyU(i,j-1)*vvel(i,j-1) - dyU(i-1,j-1)*vvel(i-1,j-1) - dudye = dxU(i,j)*uvel(i,j) - dxU(i,j-1)*uvel(i,j-1) - dudyw = dxU(i-1,j)*uvel(i-1,j) - dxU(i-1,j-1)*uvel(i-1,j-1) - vort(i,j) = p5*tarear(i,j)*(dvdxn + dvdxs - dudye - dudyw) - enddo ! ij end subroutine deformations @@ -1825,7 +1738,7 @@ subroutine deformationsCD_T (nx_block, ny_block, & uvelN, vvelN, & dxN, dyE, & dxT, dyT, & - tarear, vort, & + tarear, & shear, divu, & rdg_conv, rdg_shear ) @@ -1851,7 +1764,6 @@ subroutine deformationsCD_T (nx_block, ny_block, & tarear ! 1/tarea real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - vort , & ! vorticity (1/s) shear , & ! strain rate II component (1/s) divu , & ! strain rate I component, velocity divergence (1/s) rdg_conv , & ! convergence term for ridging (1/s) @@ -1903,9 +1815,6 @@ subroutine deformationsCD_T (nx_block, ny_block, & ! diagnostic only ! shear = sqrt(tension**2 + shearing**2) shear(i,j) = tarear(i,j)*sqrt( tensionT(i,j)**2 + shearT(i,j)**2 ) - ! vorticity - vort (i,j) = tarear(i,j)*( ( dyE(i,j)*vvelE(i,j) - dyE(i-1,j)*vvelE(i-1,j) ) & - - ( dxN(i,j)*uvelN(i,j) - dxN(i,j-1)*uvelN(i,j-1)) ) enddo ! ij @@ -1926,7 +1835,7 @@ subroutine deformationsC_T (nx_block, ny_block, & dxN, dyE, & dxT, dyT, & tarear, uarea, & - shearU, vort, & + shearU, & shear, divu, & rdg_conv, rdg_shear ) @@ -1954,7 +1863,6 @@ subroutine deformationsC_T (nx_block, ny_block, & shearU ! shearU real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - vort , & ! vorticity (1/s) shear , & ! strain rate II component (1/s) divu , & ! strain rate I component, velocity divergence (1/s) rdg_conv , & ! convergence term for ridging (1/s) @@ -2018,9 +1926,6 @@ subroutine deformationsC_T (nx_block, ny_block, & ! diagnostic only...maybe we dont want to use shearTsqr here???? ! shear = sqrt(tension**2 + shearing**2) shear(i,j) = tarear(i,j)*sqrt( tensionT(i,j)**2 + shearT(i,j)**2 ) - ! vorticity - vort (i,j) = tarear(i,j)*( ( dyE(i,j)*vvelE(i,j) - dyE(i-1,j)*vvelE(i-1,j) ) & - - ( dxN(i,j)*uvelN(i,j) - dxN(i,j-1)*uvelN(i,j-1)) ) enddo ! ij @@ -2396,15 +2301,16 @@ end subroutine strain_rates_U ! by combining tensile strength and a parameterization for grounded ridges. ! J. Geophys. Res. Oceans, 121, 7354-7368. - subroutine visc_replpress(strength, DminArea, Delta, & - zetax2, etax2, rep_prs) + subroutine visc_replpress(strength, DminArea, Delta, & + zetax2, etax2, rep_prs, capping) real (kind=dbl_kind), intent(in):: & strength, & ! DminArea ! real (kind=dbl_kind), intent(in):: & - Delta + Delta , & ! + capping ! real (kind=dbl_kind), intent(out):: & zetax2 , & ! bulk viscosity diff --git a/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 index 0d04bf974..3915004b4 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_vp.F90 @@ -52,7 +52,7 @@ module ice_dyn_vp use ice_fileunits, only: nu_diag use ice_flux, only: fmU use ice_global_reductions, only: global_sum - use ice_grid, only: dxT, dyT, uarear + use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, uarear use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_ice_strength, icepack_query_parameters @@ -120,9 +120,19 @@ subroutine init_vp use ice_boundary, only: ice_HaloUpdate use ice_constants, only: c1, & field_loc_center, field_type_scalar - use ice_domain, only: blocks_ice + use ice_domain, only: blocks_ice, halo_info use ice_calendar, only: dt_dyn use ice_dyn_shared, only: init_dyn_shared +! use ice_grid, only: tarea + + ! local variables + + integer (kind=int_kind) :: & + i, j, iblk, & + ilo,ihi,jlo,jhi ! beginning and end of physical domain + + type (block) :: & + this_block ! block information for current block call init_dyn_shared(dt_dyn) @@ -157,8 +167,7 @@ subroutine implicit_solver (dt) use ice_blocks, only: block, get_block, nx_block, ny_block use ice_domain, only: blocks_ice, halo_info, maskhalo_dyn use ice_domain_size, only: max_blocks, ncat - use ice_dyn_shared, only: deformations, iceTmask, iceUmask, & - cxp, cyp, cxm, cym + use ice_dyn_shared, only: deformations, iceTmask, iceUmask use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & @@ -167,10 +176,10 @@ subroutine implicit_solver (dt) stressp_1, stressp_2, stressp_3, stressp_4, & stressm_1, stressm_2, stressm_3, stressm_4, & stress12_1, stress12_2, stress12_3, stress12_4 - use ice_grid, only: tmask, umask, dxT, dyT, dxU, dyU, & + use ice_grid, only: tmask, umask, dxT, dyT, cxp, cyp, cxm, cym, & tarear, grid_type, grid_average_X2Y, & grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv - use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, vort, & + use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & aice_init, aice0, aicen, vicen, strength use ice_timers, only: timer_dynamics, timer_bound, & ice_timer_start, ice_timer_stop @@ -521,10 +530,9 @@ subroutine implicit_solver (dt) indxTi (:,iblk), indxTj (:,iblk), & uvel (:,:,iblk), vvel (:,:,iblk), & dxT (:,:,iblk), dyT (:,:,iblk), & - dxU (:,:,iblk), dyU (:,:,iblk), & cxp (:,:,iblk), cyp (:,:,iblk), & cxm (:,:,iblk), cym (:,:,iblk), & - tarear (:,:,iblk), vort (:,:,iblk), & + tarear (:,:,iblk), & shear (:,:,iblk), divu (:,:,iblk), & rdg_conv (:,:,iblk), rdg_shear (:,:,iblk)) enddo @@ -678,8 +686,9 @@ subroutine anderson_solver (icellT , icellU , & use ice_domain, only: maskhalo_dyn, halo_info use ice_domain_size, only: max_blocks use ice_flux, only: fmU, TbU - use ice_grid, only: dxT, dyT, uarear - use ice_dyn_shared, only: DminTarea, dxhy, dyhx, cxp, cyp, cxm, cym + use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & + uarear + use ice_dyn_shared, only: DminTarea use ice_state, only: uvel, vvel, strength use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound @@ -1212,16 +1221,20 @@ subroutine calc_zeta_dPr (nx_block, ny_block, & call visc_replpress (strength(i,j) , DminTarea(i,j) , & Deltane , zetax2 (i,j,1), & - etax2 (i,j,1), rep_prs (i,j,1)) + etax2 (i,j,1), rep_prs (i,j,1), & + capping) call visc_replpress (strength(i,j) , DminTarea(i,j) , & Deltanw , zetax2 (i,j,2), & - etax2 (i,j,2), rep_prs (i,j,2)) + etax2 (i,j,2), rep_prs (i,j,2), & + capping) call visc_replpress (strength(i,j) , DminTarea(i,j) , & Deltasw , zetax2 (i,j,3), & - etax2 (i,j,3), rep_prs (i,j,3)) + etax2 (i,j,3), rep_prs (i,j,3), & + capping) call visc_replpress (strength(i,j) , DminTarea(i,j) , & Deltase , zetax2 (i,j,4), & - etax2 (i,j,4), rep_prs (i,j,4)) + etax2 (i,j,4), rep_prs (i,j,4), & + capping) !----------------------------------------------------------------- ! the stresses ! kg/s^2 @@ -2489,7 +2502,7 @@ function global_dot_product (nx_block , ny_block , & vector2_x , vector2_y) & result(dot_product) - use ice_domain, only: distrb_info, ns_boundary_type + use ice_domain, only: distrb_info use ice_domain_size, only: max_blocks use ice_fileunits, only: bfbflag @@ -2539,14 +2552,8 @@ function global_dot_product (nx_block , ny_block , & enddo !$OMP END PARALLEL DO - ! Use faster local summation result for several bfbflag settings. - ! The local implementation sums over each block, sums over local - ! blocks, and calls global_sum on a scalar and should be just as accurate as - ! bfbflag = 'off', 'lsum8', and 'lsum4' without the extra copies and overhead - ! in the more general array global_sum. But use the array global_sum - ! if bfbflag is more strict or for tripole grids (requires special masking) - if (ns_boundary_type /= 'tripole' .and. ns_boundary_type /= 'tripoleT' .and. & - (bfbflag == 'off' .or. bfbflag == 'lsum8' .or. bfbflag == 'lsum4')) then + ! Use local summation result unless bfbflag is active + if (bfbflag == 'off') then dot_product = global_sum(sum(dot), distrb_info) else dot_product = global_sum(prod, distrb_info, field_loc_NEcorner) @@ -2744,7 +2751,6 @@ subroutine fgmres (zetax2 , etax2 , & use ice_boundary, only: ice_HaloUpdate use ice_domain, only: maskhalo_dyn, halo_info use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound - use ice_dyn_shared, only: dxhy, dyhx, cxp, cyp, cxm, cym real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) @@ -3114,7 +3120,7 @@ subroutine fgmres (zetax2 , etax2 , & j = indxUj(ij, iblk) workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_x(i, j, iblk, it) - workspace_y(i, j, iblk) = workspace_y(i, j, iblk) + rhs_hess(it) * arnoldi_basis_y(i, j, iblk, it) + workspace_y(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_y(i, j, iblk, it) enddo ! ij enddo !$OMP END PARALLEL DO @@ -3145,8 +3151,8 @@ subroutine pgmres (zetax2 , etax2 , & use ice_boundary, only: ice_HaloUpdate use ice_domain, only: maskhalo_dyn, halo_info + use ice_fileunits, only: bfbflag use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound - use ice_dyn_shared, only: dyhx, dxhy, cxp, cyp, cxm, cym real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) @@ -3337,17 +3343,21 @@ subroutine pgmres (zetax2 , etax2 , & workspace_x , workspace_y) ! Update workspace with boundary values - call stack_fields(workspace_x, workspace_y, fld2) - call ice_timer_start(timer_bound) - if (maskhalo_dyn) then - call ice_HaloUpdate (fld2, halo_info_mask, & - field_loc_NEcorner, field_type_vector) - else - call ice_HaloUpdate (fld2, halo_info, & - field_loc_NEcorner, field_type_vector) + ! NOTE: skipped for efficiency since this is just a preconditioner + ! unless bfbflag is active + if (bfbflag /= 'off') then + call stack_fields(workspace_x, workspace_y, fld2) + call ice_timer_start(timer_bound) + if (maskhalo_dyn) then + call ice_HaloUpdate (fld2, halo_info_mask, & + field_loc_NEcorner, field_type_vector) + else + call ice_HaloUpdate (fld2, halo_info, & + field_loc_NEcorner, field_type_vector) + endif + call ice_timer_stop(timer_bound) + call unstack_fields(fld2, workspace_x, workspace_y) endif - call ice_timer_stop(timer_bound) - call unstack_fields(fld2, workspace_x, workspace_y) !$OMP PARALLEL DO PRIVATE(iblk) do iblk = 1, nblocks @@ -3518,7 +3528,7 @@ subroutine pgmres (zetax2 , etax2 , & j = indxUj(ij, iblk) workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_x(i, j, iblk, it) - workspace_y(i, j, iblk) = workspace_y(i, j, iblk) + rhs_hess(it) * arnoldi_basis_y(i, j, iblk, it) + workspace_y(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_y(i, j, iblk, it) enddo ! ij enddo !$OMP END PARALLEL DO diff --git a/cicecore/cicedyn/general/ice_forcing.F90 b/cicecore/cicedyn/general/ice_forcing.F90 index ff79778c5..541efb282 100755 --- a/cicecore/cicedyn/general/ice_forcing.F90 +++ b/cicecore/cicedyn/general/ice_forcing.F90 @@ -118,7 +118,7 @@ module ice_forcing real (kind=dbl_kind), dimension(:,:,:,:,:), allocatable :: & wave_spectrum_data ! field values at 2 temporal data points - + character(char_len), public :: & atm_data_format, & ! 'bin'=binary or 'nc'=netcdf ocn_data_format, & ! 'bin'=binary or 'nc'=netcdf @@ -5650,7 +5650,7 @@ subroutine get_wave_spec file=__FILE__, line=__LINE__) else #ifdef USE_NETCDF - call wave_spec_data + call wave_spec_data #else write (nu_diag,*) "wave spectrum file not available, requires cpp USE_NETCDF" write (nu_diag,*) "wave spectrum file not available, using default profile" @@ -5682,9 +5682,9 @@ subroutine wave_spec_data use ice_grid, only: hm, tlon, tlat, tmask, umask use ice_calendar, only: days_per_year, use_leap_years - integer (kind=int_kind) :: & + integer (kind=int_kind) :: & ncid , & ! netcdf file id - i, j, freq , & + i, j, freq , & ixm,ixx,ixp , & ! record numbers for neighboring months recnum , & ! record number maxrec , & ! maximum record number @@ -5710,7 +5710,7 @@ subroutine wave_spec_data wave_spectrum_profile ! wave spectrum character(len=64) :: fieldname !netcdf field name - character(char_len_long) :: spec_file + character(char_len_long) :: spec_file character(char_len) :: wave_spec_type logical (kind=log_kind) :: wave_spec character(len=*), parameter :: subname = '(wave_spec_data)' @@ -5736,7 +5736,7 @@ subroutine wave_spec_data yr = fyear ! current year !------------------------------------------------------------------- ! 6-hourly data - ! + ! ! Assume that the 6-hourly value is located at the end of the ! 6-hour period. This is the convention for NCEP reanalysis data. ! E.g. record 1 gives conditions at 6 am GMT on 1 January. @@ -5785,9 +5785,9 @@ subroutine wave_spec_data call ice_read_nc_xyf(ncid,recnum,'efreq',wave_spectrum_data(:,:,:,2,:),debug_n_d, & field_loc=field_loc_center, & field_type=field_type_scalar) - call ice_close_nc(ncid) + call ice_close_nc(ncid) + - ! Interpolate call interpolate_wavespec_data (wave_spectrum_data, wave_spectrum) diff --git a/cicecore/cicedyn/general/ice_init.F90 b/cicecore/cicedyn/general/ice_init.F90 index a7f84e46e..d56ad002e 100644 --- a/cicecore/cicedyn/general/ice_init.F90 +++ b/cicecore/cicedyn/general/ice_init.F90 @@ -14,7 +14,7 @@ module ice_init use ice_kinds_mod use ice_communicate, only: my_task, master_task, ice_barrier - use ice_constants, only: c0, c1, c2, c3, c5, c12, p01, p2, p3, p5, p75, p166, & + use ice_constants, only: c0, c1, c2, c3, c5, c12, p2, p3, p5, p75, p166, & cm_to_m use ice_exit, only: abort_ice use ice_fileunits, only: nu_nml, nu_diag, nml_filename, diag_type, & @@ -59,44 +59,36 @@ module ice_init subroutine input_data use ice_broadcast, only: broadcast_scalar, broadcast_array - use ice_diagnostics, only: & - diag_file, print_global, print_points, latpnt, lonpnt, & - debug_model, debug_model_step, debug_model_task, & - debug_model_i, debug_model_j, debug_model_iblk + use ice_diagnostics, only: diag_file, print_global, print_points, latpnt, lonpnt, & + debug_model, debug_model_step, debug_model_task, & + debug_model_i, debug_model_j, debug_model_iblk use ice_domain, only: close_boundaries, orca_halogrid - use ice_domain_size, only: & - ncat, nilyr, nslyr, nblyr, nfsd, nfreq, & - n_iso, n_aero, n_zaero, n_algae, & - n_doc, n_dic, n_don, n_fed, n_fep, & - max_nstrm - use ice_calendar, only: & - year_init, month_init, day_init, sec_init, & - istep0, histfreq, histfreq_n, histfreq_base, & - dumpfreq, dumpfreq_n, diagfreq, dumpfreq_base, & - npt, dt, ndtd, days_per_year, use_leap_years, & - write_ic, dump_last, npt_unit + use ice_domain_size, only: ncat, nilyr, nslyr, nblyr, nfsd, nfreq, & + n_iso, n_aero, n_zaero, n_algae, & + n_doc, n_dic, n_don, n_fed, n_fep, & + max_nstrm + use ice_calendar, only: year_init, month_init, day_init, sec_init, & + istep0, histfreq, histfreq_n, histfreq_base, & + dumpfreq, dumpfreq_n, diagfreq, dumpfreq_base, & + npt, dt, ndtd, days_per_year, use_leap_years, & + write_ic, dump_last, npt_unit use ice_arrays_column, only: oceanmixed_ice - use ice_restart_column, only: & - restart_age, restart_FY, restart_lvl, & + use ice_restart_column, only: restart_age, restart_FY, restart_lvl, & restart_pond_lvl, restart_pond_topo, restart_aero, & restart_fsd, restart_iso, restart_snow use ice_restart_shared, only: & - restart, restart_ext, restart_coszen, use_restart_time, & - runtype, restart_file, restart_dir, runid, pointer_file, & - restart_format, restart_rearranger, restart_iotasks, restart_root, & - restart_stride, restart_deflate, restart_chunksize - use ice_history_shared, only: & - history_precision, hist_avg, history_format, history_file, incond_file, & - history_dir, incond_dir, version_name, history_rearranger, & - hist_suffix, history_iotasks, history_root, history_stride, & - history_deflate, history_chunksize, hist_time_axis - use ice_flux, only: update_ocn_f, cpl_frazil, l_mpond_fresh + restart, restart_ext, restart_coszen, restart_dir, restart_file, pointer_file, & + runid, runtype, use_restart_time, restart_format, lcdf64 + use ice_history_shared, only: hist_avg, history_dir, history_file, & + incond_dir, incond_file, version_name, & + history_precision, history_format + use ice_flux, only: update_ocn_f, l_mpond_fresh use ice_flux, only: default_season use ice_flux_bgc, only: cpl_bgc use ice_forcing, only: & ycycle, fyear_init, debug_forcing, & atm_data_type, atm_data_dir, precip_units, rotate_wind, & - atm_data_format, ocn_data_format, atm_data_version, & + atm_data_format, ocn_data_format, & bgc_data_type, & ocn_data_type, ocn_data_dir, wave_spec_file, & oceanmixed_file, restore_ocn, trestore, & @@ -105,37 +97,35 @@ subroutine input_data snw_tau_fname, snw_kappa_fname, snw_drdt0_fname, & snw_rhos_fname, snw_Tgrd_fname, snw_T_fname use ice_arrays_column, only: bgc_data_dir, fe_data_type - use ice_grid, only: & - grid_file, gridcpl_file, kmt_file, & - bathymetry_file, use_bathymetry, & - bathymetry_format, kmt_type, & - grid_type, grid_format, & - grid_ice, grid_ice_thrm, grid_ice_dynu, grid_ice_dynv, & - grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & - grid_atm, grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & - dxrect, dyrect, dxscale, dyscale, scale_dxdy, & - lonrefrect, latrefrect, save_ghte_ghtn - use ice_dyn_shared, only: & - ndte, kdyn, revised_evp, yield_curve, & - evp_algorithm, visc_method, & - seabed_stress, seabed_stress_method, & - k1, k2, alphab, threshold_hw, Ktens, & - e_yieldcurve, e_plasticpot, coriolis, & - ssh_stress, kridge, brlx, arlx, & - deltaminEVP, deltaminVP, capping, & - elasticDamp - use ice_dyn_vp, only: & - maxits_nonlin, precond, dim_fgmres, dim_pgmres, maxits_fgmres, & - maxits_pgmres, monitor_nonlin, monitor_fgmres, & - monitor_pgmres, reltol_nonlin, reltol_fgmres, reltol_pgmres, & - algo_nonlin, fpfunc_andacc, dim_andacc, reltol_andacc, & - damping_andacc, start_andacc, use_mean_vrel, ortho_type + use ice_grid, only: grid_file, gridcpl_file, kmt_file, & + bathymetry_file, use_bathymetry, & + bathymetry_format, kmt_type, & + grid_type, grid_format, & + grid_ice, grid_ice_thrm, grid_ice_dynu, grid_ice_dynv, & + grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & + grid_atm, grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & + dxrect, dyrect, dxscale, dyscale, scale_dxdy, & + lonrefrect, latrefrect, pgl_global_ext + use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve, & + evp_algorithm, visc_method, & + seabed_stress, seabed_stress_method, & + k1, k2, alphab, threshold_hw, Ktens, & + e_yieldcurve, e_plasticpot, coriolis, & + ssh_stress, kridge, brlx, arlx, & + deltaminEVP, deltaminVP, capping, & + elasticDamp + + use ice_dyn_vp, only: maxits_nonlin, precond, dim_fgmres, dim_pgmres, maxits_fgmres, & + maxits_pgmres, monitor_nonlin, monitor_fgmres, & + monitor_pgmres, reltol_nonlin, reltol_fgmres, reltol_pgmres, & + algo_nonlin, fpfunc_andacc, dim_andacc, reltol_andacc, & + damping_andacc, start_andacc, use_mean_vrel, ortho_type use ice_transport_driver, only: advection, conserv_check use ice_restoring, only: restore_ice use ice_timers, only: timer_stats use ice_memusage, only: memory_stats use ice_fileunits, only: goto_nml - + #ifdef CESMCOUPLED use shr_file_mod, only: shr_file_setIO #endif @@ -151,12 +141,12 @@ subroutine input_data #endif real (kind=dbl_kind) :: ustar_min, albicev, albicei, albsnowv, albsnowi, & - ahmax, R_ice, R_pnd, R_snw, dT_mlt, rsnw_mlt, emissivity, hi_min, & + ahmax, R_ice, R_pnd, R_snw, dT_mlt, rsnw_mlt, emissivity, & mu_rdg, hs0, dpscale, rfracmin, rfracmax, pndaspect, hs1, hp1, & a_rapid_mode, Rac_rapid_mode, aspect_rapid_mode, dSdt_slow_mode, & phi_c_slow_mode, phi_i_mushy, kalg, atmiter_conv, Pstar, Cstar, & sw_frac, sw_dtemp, floediam, hfrazilmin, iceruf, iceruf_ocn, & - rsnw_fall, rsnw_tmax, rhosnew, rhosmin, rhosmax, Tliquidus_max, & + rsnw_fall, rsnw_tmax, rhosnew, rhosmin, rhosmax, & windmin, drhosdwind, snwlvlfac integer (kind=int_kind) :: ktherm, kstrength, krdg_partic, krdg_redist, natmiter, & @@ -164,7 +154,7 @@ subroutine input_data character (len=char_len) :: shortwave, albedo_type, conduct, fbot_xfer_type, & tfrz_option, saltflux_option, frzpnd, atmbndy, wave_spec_type, snwredist, snw_aging_table, & - capping_method, snw_ssp_table + capping_method logical (kind=log_kind) :: calc_Tsfc, formdrag, highfreq, calc_strair, wave_spec, & sw_redist, calc_dragio, use_smliq_pnd, snwgrain @@ -173,14 +163,13 @@ subroutine input_data logical (kind=log_kind) :: tr_iso, tr_aero, tr_fsd, tr_snow logical (kind=log_kind) :: tr_pond_lvl, tr_pond_topo integer (kind=int_kind) :: numin, numax ! unit number limits - logical (kind=log_kind) :: lcdf64 ! deprecated, backwards compatibility integer (kind=int_kind) :: rplvl, rptopo - real (kind=dbl_kind) :: Cf, ksno, puny, ice_ref_salinity, Tocnfrz + real (kind=dbl_kind) :: Cf, ksno, puny, ice_ref_salinity character (len=char_len) :: abort_list character (len=char_len) :: nml_name ! namelist name - character (len=char_len_long) :: tmpstr2 + character (len=char_len_long) :: tmpstr2 character(len=*), parameter :: subname='(input_data)' @@ -194,15 +183,10 @@ subroutine input_data runtype, runid, bfbflag, numax, & ice_ic, restart, restart_dir, restart_file, & restart_ext, use_restart_time, restart_format, lcdf64, & - restart_root, restart_stride, restart_iotasks, restart_rearranger, & - restart_deflate, restart_chunksize, & pointer_file, dumpfreq, dumpfreq_n, dump_last, & diagfreq, diag_type, diag_file, history_format,& - history_root, history_stride, history_iotasks, history_rearranger, & - hist_time_axis, & print_global, print_points, latpnt, lonpnt, & debug_forcing, histfreq, histfreq_n, hist_avg, & - hist_suffix, history_deflate, history_chunksize, & history_dir, history_file, history_precision, cpl_bgc, & histfreq_base, dumpfreq_base, timer_stats, memory_stats, & conserv_check, debug_model, debug_model_step, & @@ -237,7 +221,7 @@ subroutine input_data kitd, ktherm, conduct, ksno, & a_rapid_mode, Rac_rapid_mode, aspect_rapid_mode, & dSdt_slow_mode, phi_c_slow_mode, phi_i_mushy, & - floediam, hfrazilmin, Tliquidus_max, hi_min + floediam, hfrazilmin namelist /dynamics_nml/ & kdyn, ndte, revised_evp, yield_curve, & @@ -257,7 +241,7 @@ subroutine input_data Cf, Pstar, Cstar, Ktens namelist /shortwave_nml/ & - shortwave, albedo_type, snw_ssp_table, & + shortwave, albedo_type, & albicev, albicei, albsnowv, albsnowi, & ahmax, R_ice, R_pnd, R_snw, & sw_redist, sw_frac, sw_dtemp, & @@ -280,7 +264,7 @@ subroutine input_data highfreq, natmiter, atmiter_conv, calc_dragio, & ustar_min, emissivity, iceruf, iceruf_ocn, & fbot_xfer_type, update_ocn_f, l_mpond_fresh, tfrz_option, & - saltflux_option,ice_ref_salinity,cpl_frazil, & + saltflux_option,ice_ref_salinity, & oceanmixed_ice, restore_ice, restore_ocn, trestore, & precip_units, default_season, wave_spec_type,nfreq, & atm_data_type, ocn_data_type, bgc_data_type, fe_data_type, & @@ -288,7 +272,7 @@ subroutine input_data fyear_init, ycycle, wave_spec_file,restart_coszen, & atm_data_dir, ocn_data_dir, bgc_data_dir, & atm_data_format, ocn_data_format, rotate_wind, & - oceanmixed_file, atm_data_version + oceanmixed_file !----------------------------------------------------------------- ! default values @@ -296,7 +280,7 @@ subroutine input_data abort_list = "" - call icepack_query_parameters(puny_out=puny,Tocnfrz_out=Tocnfrz) + call icepack_query_parameters(puny_out=puny) ! nu_diag not yet defined ! call icepack_warnings_flush(nu_diag) ! if (icepack_warnings_aborted()) call abort_ice(error_message=subname//'Icepack Abort0', & @@ -337,42 +321,26 @@ subroutine input_data histfreq(4) = 'm' ! output frequency option for different streams histfreq(5) = 'y' ! output frequency option for different streams histfreq_n(:) = 1 ! output frequency - histfreq_base(:) = 'zero' ! output frequency reference date - hist_avg(:) = .true. ! if true, write time-averages (not snapshots) - hist_suffix(:) = 'x' ! appended to 'history_file' in filename when not 'x' - history_format = 'cdf1'! history file format - history_root = -99 ! history iotasks, root, stride sets pes for pio - history_stride = -99 ! history iotasks, root, stride sets pes for pio - history_iotasks = -99 ! history iotasks, root, stride sets pes for pio - history_rearranger = 'default' ! history rearranger for pio - hist_time_axis = 'end' ! History file time axis averaging interval position + histfreq_base = 'zero' ! output frequency reference date + hist_avg = .true. ! if true, write time-averages (not snapshots) + history_format = 'default' ! history file format history_dir = './' ! write to executable dir for default history_file = 'iceh' ! history file name prefix history_precision = 4 ! precision of history files - history_deflate = 0 ! compression level for netcdf4 - history_chunksize(:) = 0 ! chunksize for netcdf4 write_ic = .false. ! write out initial condition cpl_bgc = .false. ! couple bgc thru driver incond_dir = history_dir ! write to history dir for default incond_file = 'iceh_ic'! file prefix - dumpfreq(:) = 'x' ! restart frequency option - dumpfreq_n(:) = 1 ! restart frequency - dumpfreq_base(:) = 'init' ! restart frequency reference date - dumpfreq(1) = 'y' ! restart frequency option - dumpfreq_n(1) = 1 ! restart frequency + dumpfreq='y' ! restart frequency option + dumpfreq_n = 1 ! restart frequency + dumpfreq_base = 'init' ! restart frequency reference date dump_last = .false. ! write restart on last time step restart_dir = './' ! write to executable dir for default restart_file = 'iced' ! restart file name prefix restart_ext = .false. ! if true, read/write ghost cells restart_coszen = .false. ! if true, read/write coszen pointer_file = 'ice.restart_file' - restart_format = 'cdf1' ! restart file format - restart_root = -99 ! restart iotasks, root, stride sets pes for pio - restart_stride = -99 ! restart iotasks, root, stride sets pes for pio - restart_iotasks = -99 ! restart iotasks, root, stride sets pes for pio - restart_rearranger = 'default' ! restart rearranger for pio - restart_deflate = 0 ! compression level for netcdf4 - restart_chunksize(:) = 0 ! chunksize for netcdf4 + restart_format = 'default' ! restart file format lcdf64 = .false. ! 64 bit offset for netCDF ice_ic = 'default' ! latitude and sst-dependent grid_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) @@ -402,7 +370,7 @@ subroutine input_data ndte = 120 ! subcycles per dynamics timestep: ndte=dt_dyn/dte evp_algorithm = 'standard_2d' ! EVP kernel (standard_2d=standard cice evp; shared_mem_1d=1d shared memory and no mpi elasticDamp = 0.36_dbl_kind ! coefficient for calculating the parameter E - save_ghte_ghtn = .false. ! if true, save global hte and htn (global ext.) + pgl_global_ext = .false. ! if true, init primary grid lengths (global ext.) brlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared arlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared revised_evp = .false. ! if true, use revised procedure for evp dynamics @@ -424,7 +392,7 @@ subroutine input_data dyscale = 1.0_dbl_kind ! user defined rectgrid y-grid scale factor (e.g., 1.02) close_boundaries = .false. ! true = set land on edges of grid seabed_stress= .false. ! if true, seabed stress for landfast is on - seabed_stress_method = 'LKD'! LKD = Lemieux et al 2015, probabilistic = Dupont et al. 2022 + seabed_stress_method = 'LKD'! LKD = Lemieux et al 2015, probabilistic = Dupont et al. in prep k1 = 7.5_dbl_kind ! 1st free parameter for landfast parameterization k2 = 15.0_dbl_kind ! 2nd free parameter (N/m^3) for landfast parametrization alphab = 20.0_dbl_kind ! alphab=Cb factor in Lemieux et al 2015 @@ -461,7 +429,6 @@ subroutine input_data advection = 'remap' ! incremental remapping transport scheme conserv_check = .false. ! tracer conservation check shortwave = 'ccsm3' ! 'ccsm3' or 'dEdd' (delta-Eddington) - snw_ssp_table = 'test' ! 'test' or 'snicar' dEdd_snicar_ad table data albedo_type = 'ccsm3' ! 'ccsm3' or 'constant' ktherm = 1 ! -1 = OFF, 1 = BL99, 2 = mushy thermo conduct = 'bubbly' ! 'MU71' or 'bubbly' (Pringle et al 2007) @@ -471,9 +438,7 @@ subroutine input_data ktransport = 1 ! -1 = off, 1 = on calc_Tsfc = .true. ! calculate surface temperature update_ocn_f = .false. ! include fresh water and salt fluxes for frazil - cpl_frazil = 'fresh_ice_correction' ! type of coupling for frazil ice ustar_min = 0.005 ! minimum friction velocity for ocean heat flux (m/s) - hi_min = p01 ! minimum ice thickness allowed (m) iceruf = 0.0005_dbl_kind ! ice surface roughness at atmosphere interface (m) iceruf_ocn = 0.03_dbl_kind ! under-ice roughness (m) calc_dragio = .false. ! compute dragio from iceruf_ocn and thickness of first ocean level @@ -528,7 +493,6 @@ subroutine input_data atm_data_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) atm_data_type = 'default' atm_data_dir = ' ' - atm_data_version = '_undef' ! date atm_data_file was generated. rotate_wind = .true. ! rotate wind/stress composants to computational grid orientation calc_strair = .true. ! calculate wind stress formdrag = .false. ! calculate form drag @@ -608,7 +572,6 @@ subroutine input_data dSdt_slow_mode = -1.5e-7_dbl_kind ! slow mode drainage strength (m s-1 K-1) phi_c_slow_mode = 0.05_dbl_kind ! critical liquid fraction porosity cutoff phi_i_mushy = 0.85_dbl_kind ! liquid fraction of congelation ice - Tliquidus_max = 0.00_dbl_kind ! maximum liquidus temperature of mush (C) floediam = 300.0_dbl_kind ! min thickness of new frazil ice (m) hfrazilmin = 0.05_dbl_kind ! effective floe diameter (m) @@ -646,7 +609,7 @@ subroutine input_data call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif - + ! read namelist nml_error = 1 do while (nml_error > 0) @@ -694,7 +657,7 @@ subroutine input_data call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif - + ! read namelist nml_error = 1 do while (nml_error > 0) @@ -718,7 +681,7 @@ subroutine input_data call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif - + ! read namelist nml_error = 1 do while (nml_error > 0) @@ -736,7 +699,7 @@ subroutine input_data ! read dynamics_nml nml_name = 'dynamics_nml' write(nu_diag,*) subname,' Reading ', trim(nml_name) - + ! goto namelist in file call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then @@ -761,7 +724,7 @@ subroutine input_data ! read shortwave_nml nml_name = 'shortwave_nml' write(nu_diag,*) subname,' Reading ', trim(nml_name) - + ! goto namelist in file call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then @@ -786,14 +749,14 @@ subroutine input_data ! read ponds_nml nml_name = 'ponds_nml' write(nu_diag,*) subname,' Reading ', trim(nml_name) - + ! goto namelist in file call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif - + ! read namelist nml_error = 1 do while (nml_error > 0) @@ -811,14 +774,14 @@ subroutine input_data ! read snow_nml nml_name = 'snow_nml' write(nu_diag,*) subname,' Reading ', trim(nml_name) - + ! goto namelist in file call goto_nml(nu_nml,trim(nml_name),nml_error) if (nml_error /= 0) then call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & file=__FILE__, line=__LINE__) endif - + ! read namelist nml_error = 1 do while (nml_error > 0) @@ -858,7 +821,7 @@ subroutine input_data endif end do - ! done reading namelist. + ! done reading namelist. close(nu_nml) call release_fileunit(nu_nml) endif @@ -935,29 +898,21 @@ subroutine input_data call broadcast_scalar(diag_file, master_task) do n = 1, max_nstrm call broadcast_scalar(histfreq(n), master_task) - call broadcast_scalar(histfreq_base(n), master_task) - call broadcast_scalar(dumpfreq(n), master_task) - call broadcast_scalar(dumpfreq_base(n), master_task) - call broadcast_scalar(hist_suffix(n), master_task) enddo - call broadcast_array(hist_avg, master_task) call broadcast_array(histfreq_n, master_task) - call broadcast_array(dumpfreq_n, master_task) + call broadcast_scalar(histfreq_base, master_task) + call broadcast_scalar(hist_avg, master_task) call broadcast_scalar(history_dir, master_task) call broadcast_scalar(history_file, master_task) call broadcast_scalar(history_precision, master_task) call broadcast_scalar(history_format, master_task) - call broadcast_scalar(history_iotasks, master_task) - call broadcast_scalar(history_root, master_task) - call broadcast_scalar(history_stride, master_task) - call broadcast_scalar(history_rearranger, master_task) - call broadcast_scalar(hist_time_axis, master_task) - call broadcast_scalar(history_deflate, master_task) - call broadcast_array(history_chunksize, master_task) call broadcast_scalar(write_ic, master_task) call broadcast_scalar(cpl_bgc, master_task) call broadcast_scalar(incond_dir, master_task) call broadcast_scalar(incond_file, master_task) + call broadcast_scalar(dumpfreq, master_task) + call broadcast_scalar(dumpfreq_n, master_task) + call broadcast_scalar(dumpfreq_base, master_task) call broadcast_scalar(dump_last, master_task) call broadcast_scalar(restart_file, master_task) call broadcast_scalar(restart, master_task) @@ -966,12 +921,6 @@ subroutine input_data call broadcast_scalar(restart_coszen, master_task) call broadcast_scalar(use_restart_time, master_task) call broadcast_scalar(restart_format, master_task) - call broadcast_scalar(restart_iotasks, master_task) - call broadcast_scalar(restart_root, master_task) - call broadcast_scalar(restart_stride, master_task) - call broadcast_scalar(restart_rearranger, master_task) - call broadcast_scalar(restart_deflate, master_task) - call broadcast_array(restart_chunksize, master_task) call broadcast_scalar(lcdf64, master_task) call broadcast_scalar(pointer_file, master_task) call broadcast_scalar(ice_ic, master_task) @@ -1003,6 +952,7 @@ subroutine input_data call broadcast_scalar(ndte, master_task) call broadcast_scalar(evp_algorithm, master_task) call broadcast_scalar(elasticDamp, master_task) + call broadcast_scalar(pgl_global_ext, master_task) call broadcast_scalar(brlx, master_task) call broadcast_scalar(arlx, master_task) call broadcast_scalar(revised_evp, master_task) @@ -1031,7 +981,6 @@ subroutine input_data call broadcast_scalar(advection, master_task) call broadcast_scalar(conserv_check, master_task) call broadcast_scalar(shortwave, master_task) - call broadcast_scalar(snw_ssp_table, master_task) call broadcast_scalar(albedo_type, master_task) call broadcast_scalar(ktherm, master_task) call broadcast_scalar(coriolis, master_task) @@ -1104,7 +1053,6 @@ subroutine input_data call broadcast_scalar(atm_data_format, master_task) call broadcast_scalar(atm_data_type, master_task) call broadcast_scalar(atm_data_dir, master_task) - call broadcast_scalar(atm_data_version, master_task) call broadcast_scalar(rotate_wind, master_task) call broadcast_scalar(calc_strair, master_task) call broadcast_scalar(calc_Tsfc, master_task) @@ -1113,10 +1061,8 @@ subroutine input_data call broadcast_scalar(natmiter, master_task) call broadcast_scalar(atmiter_conv, master_task) call broadcast_scalar(update_ocn_f, master_task) - call broadcast_scalar(cpl_frazil, master_task) call broadcast_scalar(l_mpond_fresh, master_task) call broadcast_scalar(ustar_min, master_task) - call broadcast_scalar(hi_min, master_task) call broadcast_scalar(iceruf, master_task) call broadcast_scalar(iceruf_ocn, master_task) call broadcast_scalar(calc_dragio, master_task) @@ -1191,7 +1137,6 @@ subroutine input_data call broadcast_scalar(dSdt_slow_mode, master_task) call broadcast_scalar(phi_c_slow_mode, master_task) call broadcast_scalar(phi_i_mushy, master_task) - call broadcast_scalar(Tliquidus_max, master_task) call broadcast_scalar(sw_redist, master_task) call broadcast_scalar(sw_frac, master_task) call broadcast_scalar(sw_dtemp, master_task) @@ -1269,95 +1214,6 @@ subroutine input_data abort_list = trim(abort_list)//":1" endif - if (history_format /= 'cdf1' .and. & - history_format /= 'cdf2' .and. & - history_format /= 'cdf5' .and. & - history_format /= 'hdf5' .and. & - history_format /= 'pnetcdf1' .and. & - history_format /= 'pnetcdf2' .and. & - history_format /= 'pnetcdf5' .and. & - history_format /= 'pio_netcdf' .and. & ! backwards compatibility - history_format /= 'pio_pnetcdf' .and. & ! backwards compatibility - history_format /= 'binary' .and. & - history_format /= 'default') then ! backwards compatibility - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: history_format unknown = ',trim(history_format) - endif - abort_list = trim(abort_list)//":50" - endif - - if (restart_format /= 'cdf1' .and. & - restart_format /= 'cdf2' .and. & - restart_format /= 'cdf5' .and. & - restart_format /= 'hdf5' .and. & - restart_format /= 'pnetcdf1' .and. & - restart_format /= 'pnetcdf2' .and. & - restart_format /= 'pnetcdf5' .and. & - restart_format /= 'pio_netcdf' .and. & ! backwards compatibility - restart_format /= 'pio_pnetcdf' .and. & ! backwards compatibility - restart_format /= 'binary' .and. & - restart_format /= 'default') then ! backwards compatibility - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: restart_format unknown = ',trim(restart_format) - endif - abort_list = trim(abort_list)//":51" - endif - - ! backwards compatibility for history and restart formats, lcdf64 - - if (history_format == 'pio_pnetcdf' .or. history_format == 'pio_netcdf') then - if (my_task == master_task) then - write(nu_diag,*) subname//' WARNING: history_format='//trim(history_format)// & - ' is deprecated, please update namelist settings' - endif - endif - if (restart_format == 'pio_pnetcdf' .or. restart_format == 'pio_netcdf') then - if (my_task == master_task) then - write(nu_diag,*) subname//' WARNING: restart_format='//trim(restart_format)// & - ' is deprecated, please update namelist settings' - endif - endif - - if (lcdf64) then - if (my_task == master_task) then - write(nu_diag,*) subname//' WARNING: lcdf64 is deprecated, please update namelist settings' - endif - - if (history_format == 'default' .or. history_format == 'pio_netcdf') then - history_format = 'cdf2' - elseif (history_format == 'pio_pnetcdf') then - history_format = 'pnetcdf2' - else - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: lcdf64 is T and history_format not supported for '//trim(history_format) - endif - abort_list = trim(abort_list)//":52" - endif - - if (restart_format == 'default' .or. restart_format == 'pio_netcdf') then - restart_format = 'cdf2' - elseif (restart_format == 'pio_pnetcdf') then - restart_format = 'pnetcdf2' - else - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: lcdf64 is T and restart_format not supported for '//trim(restart_format) - endif - abort_list = trim(abort_list)//":53" - endif - else - if (history_format == 'default' .or. history_format == 'pio_netcdf') then - history_format = 'cdf1' - elseif (history_format == 'pio_pnetcdf') then - history_format = 'pnetcdf1' - endif - - if (restart_format == 'default' .or. restart_format == 'pio_netcdf') then - restart_format = 'cdf1' - elseif (restart_format == 'pio_pnetcdf') then - restart_format = 'pnetcdf1' - endif - endif - if (ktransport <= 0) then advection = 'none' endif @@ -1386,10 +1242,6 @@ subroutine input_data abort_list = trim(abort_list)//":5" endif - if (kdyn == 1 .and. evp_algorithm == 'shared_mem_1d') then - save_ghte_ghtn = .true. - endif - if (kdyn == 2 .and. revised_evp) then if (my_task == master_task) then write(nu_diag,*) subname//' WARNING: revised_evp = T with EAP dynamics' @@ -1411,7 +1263,7 @@ subroutine input_data write(nu_diag,*) subname//' ERROR: invalid seabed stress method' write(nu_diag,*) subname//' ERROR: seabed_stress_method should be LKD or probabilistic' endif - abort_list = trim(abort_list)//":48" + abort_list = trim(abort_list)//":34" endif endif @@ -1428,10 +1280,10 @@ subroutine input_data endif if (grid_ice == 'C' .or. grid_ice == 'CD') then - if (kdyn > 1 .or. (kdyn == 1 .and. evp_algorithm /= 'standard_2d')) then + if (kdyn > 1) then if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: grid_ice = C | CD only supported with kdyn=1 and evp_algorithm=standard_2d' - write(nu_diag,*) subname//' ERROR: kdyn and/or evp_algorithm and grid_ice inconsistency' + write(nu_diag,*) subname//' ERROR: grid_ice = C | CD only supported with kdyn<=1 (evp or off)' + write(nu_diag,*) subname//' ERROR: kdyn and grid_ice inconsistency' endif abort_list = trim(abort_list)//":46" endif @@ -1444,15 +1296,6 @@ subroutine input_data endif endif - if (evp_algorithm == 'shared_mem_1d' .and. & - grid_type == 'tripole') then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: evp_algorithm=shared_mem_1d is not tested for gridtype=tripole' - write(nu_diag,*) subname//' ERROR: change evp_algorithm to standard_2d' - endif - abort_list = trim(abort_list)//":49" - endif - capping = -9.99e30 if (kdyn == 1 .or. kdyn == 3) then if (capping_method == 'max') then @@ -1501,15 +1344,15 @@ subroutine input_data abort_list = trim(abort_list)//":7" endif - if (shortwave(1:4) /= 'dEdd' .and. tr_pond .and. calc_tsfc) then + if (trim(shortwave) /= 'dEdd' .and. tr_pond .and. calc_tsfc) then if (my_task == master_task) then write(nu_diag,*) subname//' ERROR: tr_pond=T, calc_tsfc=T, invalid shortwave' - write(nu_diag,*) subname//' ERROR: Must use shortwave=dEdd or dEdd_snicar_ad' + write(nu_diag,*) subname//' ERROR: Must use shortwave=dEdd' endif abort_list = trim(abort_list)//":8" endif - if (snwredist(1:3) == 'ITD' .and. .not. tr_snow) then + if (snwredist(1:4) /= 'none' .and. .not. tr_snow) then if (my_task == master_task) then write (nu_diag,*) 'ERROR: snwredist on but tr_snow=F' write (nu_diag,*) 'ERROR: Use tr_snow=T for snow redistribution' @@ -1617,20 +1460,19 @@ subroutine input_data abort_list = trim(abort_list)//":36" endif - if (shortwave(1:4) /= 'dEdd' .and. tr_aero) then + if (trim(shortwave) /= 'dEdd' .and. tr_aero) then if (my_task == master_task) then write(nu_diag,*) subname//' ERROR: tr_aero=T, invalid shortwave' - write(nu_diag,*) subname//' ERROR: Must use shortwave=dEdd or dEdd_snicar_ad' + write(nu_diag,*) subname//' ERROR: Must use shortwave=dEdd' endif abort_list = trim(abort_list)//":10" endif - if (shortwave(1:4) /= 'dEdd' .and. snwgrain) then + if (trim(shortwave) /= 'dEdd' .and. snwgrain) then if (my_task == master_task) then - write (nu_diag,*) subname//' ERROR: snow grain radius is activated' - write (nu_diag,*) subname//' ERROR: Must use shortwave=dEdd or dEdd_snicar_ad' + write (nu_diag,*) 'WARNING: snow grain radius activated but' + write (nu_diag,*) 'WARNING: dEdd shortwave is not.' endif - abort_list = trim(abort_list)//":17" endif if ((rfracmin < -puny .or. rfracmin > c1+puny) .or. & @@ -1655,7 +1497,8 @@ subroutine input_data abort_list = trim(abort_list)//":13" endif -! ech: allow inconsistency for testing sensitivities. It's not recommended for science runs +! tcraig, is it really OK for users to run inconsistently? +! ech: yes, for testing sensitivities. It's not recommended for science runs if (ktherm == 1 .and. trim(tfrz_option) /= 'linear_salt') then if (my_task == master_task) then write(nu_diag,*) subname//' WARNING: ktherm = 1 and tfrz_option = ',trim(tfrz_option) @@ -1674,6 +1517,7 @@ subroutine input_data write(nu_diag,*) subname//' WARNING: For consistency, set saltflux_option = constant' endif endif +!tcraig if (ktherm == 1 .and. .not.sw_redist) then if (my_task == master_task) then write(nu_diag,*) subname//' WARNING: ktherm = 1 and sw_redist = ',sw_redist @@ -1716,88 +1560,32 @@ subroutine input_data abort_list = trim(abort_list)//":19" endif - if (history_precision .ne. 4 .and. history_precision .ne. 8) then + if(history_precision .ne. 4 .and. history_precision .ne. 8) then write (nu_diag,*) subname//' ERROR: bad value for history_precision, allowed values: 4, 8' abort_list = trim(abort_list)//":22" endif - do n = 1,max_nstrm - if (histfreq_base(n) /= 'init' .and. histfreq_base(n) /= 'zero') then - write (nu_diag,*) subname//' ERROR: bad value for histfreq_base, allowed values: init, zero: '//trim(histfreq_base(n)) - abort_list = trim(abort_list)//":24" - endif - - if (dumpfreq_base(n) /= 'init' .and. dumpfreq_base(n) /= 'zero') then - write (nu_diag,*) subname//' ERROR: bad value for dumpfreq_base, allowed values: init, zero: '//trim(dumpfreq_base(n)) - abort_list = trim(abort_list)//":25" - endif - - if (.not.(scan(dumpfreq(n)(1:1),'ymdhx1YMDHX') == 1 .and. (dumpfreq(n)(2:2) == '1' .or. dumpfreq(n)(2:2) == ' '))) then - if (my_task == master_task) then - write(nu_diag,*) subname//' WARNING: unrecognized dumpfreq=', trim(dumpfreq(n)) - write(nu_diag,*) subname//' WARNING: No restarts files will be written for this stream' - write(nu_diag,*) subname//' WARNING: Allowed values : y,m,d,h,x,1 followed by an optional 1' - endif - dumpfreq(n) = 'x' - endif - enddo - - if (trim(hist_time_axis) /= 'begin' .and. trim(hist_time_axis) /= 'middle' .and. trim(hist_time_axis) /= 'end') then - write (nu_diag,*) subname//' ERROR: hist_time_axis value not valid = '//trim(hist_time_axis) - abort_list = trim(abort_list)//":29" - endif - -#ifdef USE_PIO1 - if (history_deflate/=0 .or. restart_deflate/=0 .or. & - history_chunksize(1)/=0 .or. history_chunksize(2)/=0 .or. & - restart_chunksize(1)/=0 .or. restart_chunksize(2)/=0) then - if (my_task == master_task) write (nu_diag,*) subname//' ERROR: _deflate and _chunksize not compatible with PIO1' - abort_list = trim(abort_list)//":54" - endif -#else -#ifndef CESMCOUPLED - ! history_format not used by nuopc driver - if (history_format/='hdf5' .and. history_deflate/=0) then - if (my_task == master_task) then - write (nu_diag,*) subname//' WARNING: history_deflate not compatible with '//history_format - write (nu_diag,*) subname//' WARNING: netcdf compression only possible with history_type="hdf5" ' - endif - endif - - if (history_format/='hdf5' .and. (history_chunksize(1)/=0 .or. history_chunksize(2)/=0)) then - if (my_task == master_task) then - write (nu_diag,*) subname//' WARNING: history_chunksize not compatible with '//history_format - write (nu_diag,*) subname//' WARNING: netcdf chunking only possible with history_type="hdf5" ' - endif + if(histfreq_base /= 'init' .and. histfreq_base /= 'zero') then + write (nu_diag,*) subname//' ERROR: bad value for histfreq_base, allowed values: init, zero' + abort_list = trim(abort_list)//":24" endif - if (restart_format/='hdf5' .and. restart_deflate/=0) then - if (my_task == master_task) then - write (nu_diag,*) subname//' WARNING: restart_deflate not compatible with '//restart_format - write (nu_diag,*) subname//' WARNING: netcdf compression only possible with restart_type="hdf5" ' - endif + if(dumpfreq_base /= 'init' .and. dumpfreq_base /= 'zero') then + write (nu_diag,*) subname//' ERROR: bad value for dumpfreq_base, allowed values: init, zero' + abort_list = trim(abort_list)//":25" endif - if (restart_format/='hdf5' .and. (restart_chunksize(1)/=0 .or. restart_chunksize(2)/=0)) then + if (.not.(trim(dumpfreq) == 'y' .or. trim(dumpfreq) == 'Y' .or. & + trim(dumpfreq) == 'm' .or. trim(dumpfreq) == 'M' .or. & + trim(dumpfreq) == 'd' .or. trim(dumpfreq) == 'D' .or. & + trim(dumpfreq) == 'h' .or. trim(dumpfreq) == 'H' .or. & + trim(dumpfreq) == '1' )) then if (my_task == master_task) then - write (nu_diag,*) subname//' WARNING: restart_chunksize not compatible with '//restart_format - write (nu_diag,*) subname//' WARNING: netcdf chunking only possible with restart_type="hdf5" ' + write(nu_diag,*) subname//' WARNING: unrecognized dumpfreq=', trim(dumpfreq) + write(nu_diag,*) subname//' WARNING: No restarts files will be written' + write(nu_diag,*) subname//' WARNING: Allowed values : ''y'', ''m'', ''d'', ''h'', ''1''' endif endif -#endif - - if (history_deflate<0 .or. history_deflate>9) then - if (my_task == master_task) write (nu_diag,*) subname//& - ' ERROR: history_deflate value not valid. Allowed range: integers from 0 to 9 ' - abort_list = trim(abort_list)//":55" - endif - - if (restart_deflate<0 .or. restart_deflate>9) then - if (my_task == master_task) write (nu_diag,*) subname//& - ' ERROR: restart_deflate value not valid. Allowed range: integers from 0 to 9 ' - abort_list = trim(abort_list)//":56" - endif -#endif ! Implicit solver input validation if (kdyn == 3) then @@ -1966,7 +1754,7 @@ subroutine input_data write(nu_diag,1020) ' nilyr = ', nilyr, ' : number of ice layers (equal thickness)' write(nu_diag,1020) ' nslyr = ', nslyr, ' : number of snow layers (equal thickness)' write(nu_diag,1020) ' nblyr = ', nblyr, ' : number of bio layers (equal thickness)' - if (shortwave(1:4) == 'dEdd') & + if (trim(shortwave) == 'dEdd') & write(nu_diag,*) 'dEdd interior and sfc scattering layers are used in both ice, snow (unequal)' write(nu_diag,1020) ' ncat = ', ncat, ' : number of ice categories' if (kcatbound == 0) then @@ -2026,6 +1814,7 @@ subroutine input_data tmpstr2 = ' : standard 2d EVP solver' elseif (evp_algorithm == 'shared_mem_1d') then tmpstr2 = ' : vectorized 1d EVP solver' + pgl_global_ext = .true. else tmpstr2 = ' : unknown value' endif @@ -2201,7 +1990,6 @@ subroutine input_data write(nu_diag,1009) ' dSdt_slow_mode = ', dSdt_slow_mode,' : drainage strength parameter' write(nu_diag,1002) ' phi_c_slow_mode = ', phi_c_slow_mode,' : critical liquid fraction' write(nu_diag,1002) ' phi_i_mushy = ', phi_i_mushy,' : solid fraction at lower boundary' - write(nu_diag,1002) ' Tliquidus_max = ', Tliquidus_max,' : max mush liquidus temperature' endif write(nu_diag,1002) ' hfrazilmin = ', hfrazilmin,' : minimum new frazil ice thickness' @@ -2210,24 +1998,19 @@ subroutine input_data write(nu_diag,*) '--------------------------------' if (trim(shortwave) == 'dEdd') then tmpstr2 = ' : delta-Eddington multiple-scattering method' - elseif (trim(shortwave) == 'dEdd_snicar_ad') then - tmpstr2 = ' : delta-Eddington multiple-scattering method with SNICAR AD' elseif (trim(shortwave) == 'ccsm3') then tmpstr2 = ' : NCAR CCSM3 distribution method' else tmpstr2 = ' : unknown value' endif write(nu_diag,1030) ' shortwave = ', trim(shortwave),trim(tmpstr2) - if (shortwave(1:4) == 'dEdd') then + if (trim(shortwave) == 'dEdd') then write(nu_diag,1002) ' R_ice = ', R_ice,' : tuning parameter for sea ice albedo' write(nu_diag,1002) ' R_pnd = ', R_pnd,' : tuning parameter for ponded sea ice albedo' write(nu_diag,1002) ' R_snw = ', R_snw,' : tuning parameter for snow broadband albedo' write(nu_diag,1002) ' dT_mlt = ', dT_mlt,' : change in temperature per change in snow grain radius' write(nu_diag,1002) ' rsnw_mlt = ', rsnw_mlt,' : maximum melting snow grain radius' write(nu_diag,1002) ' kalg = ', kalg,' : absorption coefficient for algae' - if (trim(shortwave) == 'dEdd_snicar_ad') then - write(nu_diag,1030) ' snw_ssp_table = ', trim(snw_ssp_table) - endif else if (trim(albedo_type) == 'ccsm3') then tmpstr2 = ' : NCAR CCSM3 albedos' @@ -2298,10 +2081,8 @@ subroutine input_data if (trim(saltflux_option) == 'constant') then write(nu_diag,1002) ' ice_ref_salinity = ',ice_ref_salinity endif - if (trim(tfrz_option) == 'constant') then - tmpstr2 = ' : constant ocean freezing temperature (Tocnfrz)' - elseif (trim(tfrz_option) == 'minus1p8') then - tmpstr2 = ' : constant ocean freezing temperature (-1.8C) (to be deprecated)' + if (trim(tfrz_option) == 'minus1p8') then + tmpstr2 = ' : constant ocean freezing temperature (-1.8C)' elseif (trim(tfrz_option) == 'linear_salt') then tmpstr2 = ' : linear function of salinity (use with ktherm=1)' elseif (trim(tfrz_option) == 'mushy') then @@ -2310,16 +2091,12 @@ subroutine input_data tmpstr2 = ' : unknown value' endif write(nu_diag,1030) ' tfrz_option = ', trim(tfrz_option),trim(tmpstr2) - if (trim(tfrz_option) == 'constant') then - write(nu_diag,1002) ' Tocnfrz = ', Tocnfrz - endif if (update_ocn_f) then tmpstr2 = ' : frazil water/salt fluxes included in ocean fluxes' else tmpstr2 = ' : frazil water/salt fluxes not included in ocean fluxes' endif write(nu_diag,1010) ' update_ocn_f = ', update_ocn_f,trim(tmpstr2) - write(nu_diag,1030) ' cpl_frazil = ', trim(cpl_frazil) if (l_mpond_fresh .and. tr_pond_topo) then tmpstr2 = ' : retain (topo) pond water until ponds drain' else @@ -2335,14 +2112,13 @@ subroutine input_data endif write(nu_diag,1030) ' fbot_xfer_type = ', trim(fbot_xfer_type),trim(tmpstr2) write(nu_diag,1000) ' ustar_min = ', ustar_min,' : minimum value of ocean friction velocity' - write(nu_diag,1000) ' hi_min = ', hi_min,' : minimum ice thickness allowed (m)' if (calc_dragio) then tmpstr2 = ' : dragio computed from iceruf_ocn' else tmpstr2 = ' : dragio hard-coded' endif write(nu_diag,1010) ' calc_dragio = ', calc_dragio,trim(tmpstr2) - if (calc_dragio) then + if(calc_dragio) then write(nu_diag,1002) ' iceruf_ocn = ', iceruf_ocn,' : under-ice roughness length' endif @@ -2405,7 +2181,7 @@ subroutine input_data write(nu_diag,*) 'Using default dEdd melt pond scheme for testing only' endif - if (shortwave(1:4) == 'dEdd') then + if (trim(shortwave) == 'dEdd') then write(nu_diag,1002) ' hs0 = ', hs0,' : snow depth of transition to bare sea ice' endif @@ -2534,40 +2310,27 @@ subroutine input_data write(nu_diag,1021) ' numax = ', numax write(nu_diag,1033) ' histfreq = ', histfreq(:) write(nu_diag,1023) ' histfreq_n = ', histfreq_n(:) - write(nu_diag,1033) ' histfreq_base = ', histfreq_base(:) - write(nu_diag,1013) ' hist_avg = ', hist_avg(:) - write(nu_diag,1033) ' hist_suffix = ', hist_suffix(:) + write(nu_diag,1031) ' histfreq_base = ', trim(histfreq_base) + write(nu_diag,1011) ' hist_avg = ', hist_avg + if (.not. hist_avg) write(nu_diag,1039) ' History data will be snapshots' write(nu_diag,1031) ' history_dir = ', trim(history_dir) write(nu_diag,1031) ' history_file = ', trim(history_file) write(nu_diag,1021) ' history_precision= ', history_precision write(nu_diag,1031) ' history_format = ', trim(history_format) - write(nu_diag,1031) ' history_rearranger = ', trim(history_rearranger) - write(nu_diag,1021) ' history_iotasks = ', history_iotasks - write(nu_diag,1021) ' history_root = ', history_root - write(nu_diag,1021) ' history_stride = ', history_stride - write(nu_diag,1031) ' hist_time_axis = ', trim(hist_time_axis) - write(nu_diag,1021) ' history_deflate = ', history_deflate - write(nu_diag,1023) ' history_chunksize= ', history_chunksize if (write_ic) then write(nu_diag,1039) ' Initial condition will be written in ', & trim(incond_dir) endif - write(nu_diag,1033) ' dumpfreq = ', dumpfreq(:) - write(nu_diag,1023) ' dumpfreq_n = ', dumpfreq_n(:) - write(nu_diag,1033) ' dumpfreq_base = ', dumpfreq_base(:) + write(nu_diag,1031) ' dumpfreq = ', trim(dumpfreq) + write(nu_diag,1021) ' dumpfreq_n = ', dumpfreq_n + write(nu_diag,1031) ' dumpfreq_base = ', trim(dumpfreq_base) write(nu_diag,1011) ' dump_last = ', dump_last write(nu_diag,1011) ' restart = ', restart write(nu_diag,1031) ' restart_dir = ', trim(restart_dir) write(nu_diag,1011) ' restart_ext = ', restart_ext write(nu_diag,1011) ' restart_coszen = ', restart_coszen write(nu_diag,1031) ' restart_format = ', trim(restart_format) - write(nu_diag,1021) ' restart_deflate = ', restart_deflate - write(nu_diag,1023) ' restart_chunksize= ', restart_chunksize -! write(nu_diag,1011) ' lcdf64 = ', lcdf64 ! deprecated - write(nu_diag,1031) ' restart_rearranger = ', trim(restart_rearranger) - write(nu_diag,1021) ' restart_iotasks = ', restart_iotasks - write(nu_diag,1021) ' restart_root = ', restart_root - write(nu_diag,1021) ' restart_stride = ', restart_stride + write(nu_diag,1011) ' lcdf64 = ', lcdf64 write(nu_diag,1031) ' restart_file = ', trim(restart_file) write(nu_diag,1031) ' pointer_file = ', trim(pointer_file) write(nu_diag,1011) ' use_restart_time = ', use_restart_time @@ -2587,12 +2350,10 @@ subroutine input_data write(nu_diag,1021) ' fyear_init = ', fyear_init write(nu_diag,1021) ' ycycle = ', ycycle write(nu_diag,1031) ' atm_data_type = ', trim(atm_data_type) - write(nu_diag,1031) ' atm_data_version = ', trim(atm_data_version) - if (trim(atm_data_type) /= 'default') then write(nu_diag,1031) ' atm_data_dir = ', trim(atm_data_dir) write(nu_diag,1031) ' precip_units = ', trim(precip_units) - elseif (trim(atm_data_type) == 'default') then + elseif (trim(atm_data_type)=='default') then write(nu_diag,1031) ' default_season = ', trim(default_season) endif @@ -2675,8 +2436,6 @@ subroutine input_data if (kmt_type /= 'file' .and. & kmt_type /= 'channel' .and. & - kmt_type /= 'channel_oneeast' .and. & - kmt_type /= 'channel_onenorth' .and. & kmt_type /= 'wall' .and. & kmt_type /= 'default' .and. & kmt_type /= 'boxislands') then @@ -2710,7 +2469,7 @@ subroutine input_data call icepack_init_parameters(ustar_min_in=ustar_min, albicev_in=albicev, albicei_in=albicei, & albsnowv_in=albsnowv, albsnowi_in=albsnowi, natmiter_in=natmiter, atmiter_conv_in=atmiter_conv, & - emissivity_in=emissivity, snw_ssp_table_in=snw_ssp_table, hi_min_in=hi_min, & + emissivity_in=emissivity, & ahmax_in=ahmax, shortwave_in=shortwave, albedo_type_in=albedo_type, R_ice_in=R_ice, R_pnd_in=R_pnd, & R_snw_in=R_snw, dT_mlt_in=dT_mlt, rsnw_mlt_in=rsnw_mlt, & kstrength_in=kstrength, krdg_partic_in=krdg_partic, krdg_redist_in=krdg_redist, mu_rdg_in=mu_rdg, & @@ -2719,11 +2478,11 @@ subroutine input_data rfracmin_in=rfracmin, rfracmax_in=rfracmax, pndaspect_in=pndaspect, hs1_in=hs1, hp1_in=hp1, & ktherm_in=ktherm, calc_Tsfc_in=calc_Tsfc, conduct_in=conduct, & a_rapid_mode_in=a_rapid_mode, Rac_rapid_mode_in=Rac_rapid_mode, & - floediam_in=floediam, hfrazilmin_in=hfrazilmin, Tliquidus_max_in=Tliquidus_max, & + floediam_in=floediam, hfrazilmin_in=hfrazilmin, & aspect_rapid_mode_in=aspect_rapid_mode, dSdt_slow_mode_in=dSdt_slow_mode, & phi_c_slow_mode_in=phi_c_slow_mode, phi_i_mushy_in=phi_i_mushy, conserv_check_in=conserv_check, & - wave_spec_type_in = wave_spec_type, wave_spec_in=wave_spec, nfreq_in=nfreq, & - update_ocn_f_in=update_ocn_f, cpl_frazil_in=cpl_frazil, & + wave_spec_type_in = wave_spec_type, & + wave_spec_in=wave_spec, nfreq_in=nfreq, & tfrz_option_in=tfrz_option, kalg_in=kalg, fbot_xfer_type_in=fbot_xfer_type, & saltflux_option_in=saltflux_option, ice_ref_salinity_in=ice_ref_salinity, & Pstar_in=Pstar, Cstar_in=Cstar, iceruf_in=iceruf, iceruf_ocn_in=iceruf_ocn, calc_dragio_in=calc_dragio, & @@ -2750,7 +2509,6 @@ subroutine input_data 1009 format (a20,1x,d13.6,1x,a) 1010 format (a20,8x,l6,1x,a) ! logical 1011 format (a20,1x,l6) - 1013 format (a20,1x,6l3) 1020 format (a20,8x,i6,1x,a) ! integer 1021 format (a20,1x,i6) 1022 format (a20,1x,i12) @@ -2775,7 +2533,7 @@ subroutine init_state use ice_domain, only: nblocks, blocks_ice, halo_info use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero, nfsd use ice_flux, only: sst, Tf, Tair, salinz, Tmltz - use ice_grid, only: tmask, umask, ULON, TLAT, grid_ice, grid_average_X2Y + use ice_grid, only: tmask, ULON, TLAT, grid_ice, grid_average_X2Y use ice_boundary, only: ice_HaloUpdate use ice_constants, only: field_loc_Nface, field_loc_Eface, field_type_scalar use ice_state, only: trcr_depend, aicen, trcrn, vicen, vsnon, & @@ -2963,7 +2721,6 @@ subroutine init_state ilo, ihi, jlo, jhi, & iglob, jglob, & ice_ic, tmask(:,:, iblk), & - umask(:,:, iblk), & ULON (:,:, iblk), & TLAT (:,:, iblk), & Tair (:,:, iblk), sst (:,:, iblk), & @@ -2986,10 +2743,10 @@ subroutine init_state if (grid_ice == 'CD' .or. grid_ice == 'C') then - call grid_average_X2Y('A',uvel,'U',uvelN,'N') - call grid_average_X2Y('A',vvel,'U',vvelN,'N') - call grid_average_X2Y('A',uvel,'U',uvelE,'E') - call grid_average_X2Y('A',vvel,'U',vvelE,'E') + call grid_average_X2Y('S',uvel,'U',uvelN,'N') + call grid_average_X2Y('S',vvel,'U',vvelN,'N') + call grid_average_X2Y('S',uvel,'U',uvelE,'E') + call grid_average_X2Y('S',vvel,'U',vvelE,'E') ! Halo update on North, East faces call ice_HaloUpdate(uvelN, halo_info, & @@ -3004,6 +2761,7 @@ subroutine init_state endif + !----------------------------------------------------------------- ! compute aggregate ice state and open water area !----------------------------------------------------------------- @@ -3035,8 +2793,7 @@ subroutine init_state trcr_depend = trcr_depend(:), & trcr_base = trcr_base(:,:), & n_trcr_strata = n_trcr_strata(:), & - nt_strata = nt_strata(:,:), & - Tf = Tf(i,j,iblk)) + nt_strata = nt_strata(:,:)) aice_init(i,j,iblk) = aice(i,j,iblk) @@ -3063,9 +2820,8 @@ subroutine set_state_var (nx_block, ny_block, & ilo, ihi, jlo, jhi, & iglob, jglob, & ice_ic, tmask, & - umask, & - ULON, & - TLAT, & + ULON, & + TLAT, & Tair, sst, & Tf, & salinz, Tmltz, & @@ -3090,8 +2846,7 @@ subroutine set_state_var (nx_block, ny_block, & ice_ic ! method of ice cover initialization logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & - tmask , & ! true for ice/ocean cells - umask ! for U points + tmask ! true for ice/ocean cells real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & ULON , & ! longitude of velocity pts (radians) @@ -3145,7 +2900,7 @@ subroutine set_state_var (nx_block, ny_block, & indxi, indxj ! compressed indices for cells with aicen > puny real (kind=dbl_kind) :: & - Tsfc, asum, hbar, abar, puny, rhos, Lfresh, rad_to_deg, rsnw_fall, dist_ratio, Tffresh + Tsfc, sum, hbar, abar, puny, rhos, Lfresh, rad_to_deg, rsnw_fall, dist_ratio, Tffresh real (kind=dbl_kind), dimension(ncat) :: & ainit, hinit ! initial area, thickness @@ -3281,7 +3036,7 @@ subroutine set_state_var (nx_block, ny_block, & ! Note: the resulting average ice thickness ! tends to be less than hbar due to the ! nonlinear distribution of ice thicknesses - asum = c0 + sum = c0 do n = 1, ncat if (n < ncat) then hinit(n) = p5*(hin_max(n-1) + hin_max(n)) ! m @@ -3290,10 +3045,10 @@ subroutine set_state_var (nx_block, ny_block, & endif ! parabola, max at h=hbar, zero at h=0, 2*hbar ainit(n) = max(c0, (c2*hbar*hinit(n) - hinit(n)**2)) - asum = asum + ainit(n) + sum = sum + ainit(n) enddo do n = 1, ncat - ainit(n) = ainit(n) / (asum + puny/ncat) ! normalize + ainit(n) = ainit(n) / (sum + puny/ncat) ! normalize enddo else @@ -3368,7 +3123,7 @@ subroutine set_state_var (nx_block, ny_block, & enddo enddo - elseif (ice_data_type(1:7) == 'channel') then + elseif (trim(ice_data_type) == 'channel') then ! channel ice in center of domain in i direction icells = 0 do j = jlo, jhi @@ -3539,19 +3294,13 @@ subroutine set_state_var (nx_block, ny_block, & domain_length = dxrect*cm_to_m*nx_global period = c12*secday ! 12 days rotational period max_vel = pi*domain_length/period - do j = 1, ny_block do i = 1, nx_block - if (umask(i,j)) then - uvel(i,j) = c2*max_vel*(real(jglob(j), kind=dbl_kind) - p5) & - / real(ny_global - 1, kind=dbl_kind) - max_vel - vvel(i,j) = -c2*max_vel*(real(iglob(i), kind=dbl_kind) - p5) & - / real(nx_global - 1, kind=dbl_kind) + max_vel - else - uvel(i,j) = c0 - vvel(i,j) = c0 - endif + uvel(i,j) = c2*max_vel*(real(jglob(j), kind=dbl_kind) - p5) & + / real(ny_global - 1, kind=dbl_kind) - max_vel + vvel(i,j) = -c2*max_vel*(real(iglob(i), kind=dbl_kind) - p5) & + / real(nx_global - 1, kind=dbl_kind) + max_vel enddo ! j enddo ! i else diff --git a/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 index fe8432a2d..f69c21ebd 100644 --- a/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 +++ b/cicecore/cicedyn/infrastructure/comm/mpi/ice_boundary.F90 @@ -6880,9 +6880,6 @@ subroutine primary_grid_lengths_global_ext( & ! This subroutine adds ghost cells to global primary grid lengths array ! ARRAY_I and outputs result to array ARRAY_O -! Note duplicate implementation of this subroutine in: -! cicecore/cicedynB/infrastructure/comm/serial/ice_boundary.F90 - use ice_constants, only: c0 use ice_domain_size, only: nx_global, ny_global diff --git a/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 index cca7ea849..757d69d2e 100644 --- a/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 +++ b/cicecore/cicedyn/infrastructure/comm/serial/ice_boundary.F90 @@ -4660,9 +4660,6 @@ subroutine primary_grid_lengths_global_ext( & ! This subroutine adds ghost cells to global primary grid lengths array ! ARRAY_I and outputs result to array ARRAY_O -! Note duplicate implementation of this subroutine in: -! cicecore/cicedynB/infrastructure/comm/mpi/ice_boundary.F90 - use ice_constants, only: c0 use ice_domain_size, only: nx_global, ny_global diff --git a/cicecore/cicedyn/infrastructure/ice_domain.F90 b/cicecore/cicedyn/infrastructure/ice_domain.F90 index 10254aa93..ff1fac723 100644 --- a/cicecore/cicedyn/infrastructure/ice_domain.F90 +++ b/cicecore/cicedyn/infrastructure/ice_domain.F90 @@ -104,7 +104,7 @@ subroutine init_domain_blocks use ice_distribution, only: processor_shape use ice_domain_size, only: ncat, nilyr, nslyr, max_blocks, & nx_global, ny_global, block_size_x, block_size_y - + use ice_fileunits, only: goto_nml !---------------------------------------------------------------------- ! ! local variables @@ -114,6 +114,9 @@ subroutine init_domain_blocks integer (int_kind) :: & nml_error ! namelist read error flag + character(len=char_len) :: nml_name ! text namelist name + character(len=char_len_long) :: tmpstr2 ! for namelist check + character(len=*), parameter :: subname = '(init_domain_blocks)' !---------------------------------------------------------------------- @@ -167,26 +170,39 @@ subroutine init_domain_blocks landblockelim = .true. ! on by default if (my_task == master_task) then - write(nu_diag,*) subname,' Reading domain_nml' - + nml_name = 'domain_nml' + write(nu_diag,*) subname,' Reading ', trim(nml_name) + call get_fileunit(nu_nml) open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) if (nml_error /= 0) then call abort_ice(subname//'ERROR: domain_nml open file '// & - trim(nml_filename), & - file=__FILE__, line=__LINE__) + trim(nml_filename), & + file=__FILE__, line=__LINE__) endif + call goto_nml(nu_nml,trim(nml_name),nml_error) + if (nml_error /= 0) then + call abort_ice(subname//'ERROR: searching for '// trim(nml_name), & + file=__FILE__, line=__LINE__) + endif + nml_error = 1 do while (nml_error > 0) read(nu_nml, nml=domain_nml,iostat=nml_error) + ! check if error + if (nml_error /= 0) then + ! backspace and re-read erroneous line + backspace(nu_nml) + read(nu_nml,fmt='(A)') tmpstr2 + call abort_ice(subname//'ERROR: ' // trim(nml_name) // ' reading ' // & + trim(tmpstr2), file=__FILE__, line=__LINE__) + endif end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: domain_nml reading ', & - file=__FILE__, line=__LINE__) - endif + close(nu_nml) call release_fileunit(nu_nml) + endif call broadcast_scalar(nprocs, master_task) diff --git a/cicecore/cicedyn/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 index c43b7989c..b775c21f2 100644 --- a/cicecore/cicedyn/infrastructure/ice_grid.F90 +++ b/cicecore/cicedyn/infrastructure/ice_grid.F90 @@ -24,22 +24,18 @@ module ice_grid use ice_kinds_mod use ice_broadcast, only: broadcast_scalar, broadcast_array - use ice_boundary, only: ice_HaloUpdate, ice_HaloExtrapolate - use ice_constants, only: c0, c1, c1p5, c2, c4, c20, c360, & - p5, p25, radius, cm_to_m, m_to_cm, & - field_loc_center, field_loc_NEcorner, field_loc_Nface, field_loc_Eface, & - field_type_scalar, field_type_vector, field_type_angle + use ice_boundary, only: ice_HaloUpdate, ice_HaloExtrapolate, & + primary_grid_lengths_global_ext use ice_communicate, only: my_task, master_task use ice_blocks, only: block, get_block, nx_block, ny_block, nghost use ice_domain_size, only: nx_global, ny_global, max_blocks use ice_domain, only: blocks_ice, nblocks, halo_info, distrb_info, & - ew_boundary_type, ns_boundary_type, init_domain_distribution, & - close_boundaries + ew_boundary_type, ns_boundary_type, init_domain_distribution use ice_fileunits, only: nu_diag, nu_grid, nu_kmt, & get_fileunit, release_fileunit, flush_fileunit use ice_gather_scatter, only: gather_global, scatter_global use ice_read_write, only: ice_read, ice_read_nc, ice_read_global, & - ice_read_global_nc, ice_open, ice_open_nc, ice_close_nc, ice_check_nc + ice_read_global_nc, ice_open, ice_open_nc, ice_close_nc use ice_timers, only: timer_bound, ice_timer_start, ice_timer_stop use ice_exit, only: abort_ice use ice_global_reductions, only: global_minval, global_maxval @@ -48,9 +44,8 @@ module ice_grid implicit none private - public :: init_grid1, init_grid2, grid_average_X2Y, makemask, & - alloc_grid, dealloc_grid, & - grid_neighbor_min, grid_neighbor_max + public :: init_grid1, init_grid2, grid_average_X2Y, & + alloc_grid, makemask, grid_neighbor_min, grid_neighbor_max character (len=char_len_long), public :: & grid_format , & ! file format ('bin'=binary or 'nc'=netcdf) @@ -87,14 +82,14 @@ module ice_grid dyE , & ! height of E-cell through the middle (m) HTE , & ! length of eastern edge of T-cell (m) HTN , & ! length of northern edge of T-cell (m) - tarea , & ! area of T-cell (m^2), valid in halo - uarea , & ! area of U-cell (m^2), valid in halo - narea , & ! area of N-cell (m^2), valid in halo - earea , & ! area of E-cell (m^2), valid in halo - tarear , & ! 1/tarea, valid in halo - uarear , & ! 1/uarea, valid in halo - narear , & ! 1/narea, valid in halo - earear , & ! 1/earea, valid in halo + tarea , & ! area of T-cell (m^2) + uarea , & ! area of U-cell (m^2) + narea , & ! area of N-cell (m^2) + earea , & ! area of E-cell (m^2) + tarear , & ! 1/tarea + uarear , & ! 1/uarea + narear , & ! 1/narea + earear , & ! 1/earea tarean , & ! area of NH T-cells tareas , & ! area of SH T-cells ULON , & ! longitude of velocity pts, NE corner of T pts (radians) @@ -106,7 +101,7 @@ module ice_grid ELON , & ! longitude of center of east face of T pts (radians) ELAT , & ! latitude of center of east face of T pts (radians) ANGLE , & ! for conversions between POP grid and lat/lon - ANGLET , & ! ANGLE converted to T-cells, valid in halo + ANGLET , & ! ANGLE converted to T-cells bathymetry , & ! ocean depth, for grounding keels and bergs (m) ocn_gridcell_frac ! only relevant for lat-lon grids ! gridcell value of [1 - (land fraction)] (T-cell) @@ -115,6 +110,20 @@ module ice_grid G_HTE , & ! length of eastern edge of T-cell (global ext.) G_HTN ! length of northern edge of T-cell (global ext.) + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + cyp , & ! 1.5*HTE(i,j)-0.5*HTW(i,j) = 1.5*HTE(i,j)-0.5*HTE(i-1,j) + cxp , & ! 1.5*HTN(i,j)-0.5*HTS(i,j) = 1.5*HTN(i,j)-0.5*HTN(i,j-1) + cym , & ! 0.5*HTE(i,j)-1.5*HTW(i,j) = 0.5*HTE(i,j)-1.5*HTE(i-1,j) + cxm , & ! 0.5*HTN(i,j)-1.5*HTS(i,j) = 0.5*HTN(i,j)-1.5*HTN(i,j-1) + dxhy , & ! 0.5*(HTE(i,j) - HTW(i,j)) = 0.5*(HTE(i,j) - HTE(i-1,j)) + dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) + + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + ratiodxN , & ! - dxN(i+1,j) / dxN(i,j) + ratiodyE , & ! - dyE(i ,j+1) / dyE(i,j) + ratiodxNr , & ! 1 / ratiodxN + ratiodyEr ! 1 / ratiodyE + ! grid dimensions for rectangular grid real (kind=dbl_kind), public :: & dxrect, & ! user_specified spacing (cm) in x-direction (uniform HTN) @@ -140,6 +149,26 @@ module ice_grid lone_bounds, & ! longitude of gridbox corners for E point late_bounds ! latitude of gridbox corners for E point + ! geometric quantities used for remapping transport + real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & + xav , & ! mean T-cell value of x + yav , & ! mean T-cell value of y + xxav , & ! mean T-cell value of xx +! xyav , & ! mean T-cell value of xy +! yyav , & ! mean T-cell value of yy + yyav ! mean T-cell value of yy +! xxxav, & ! mean T-cell value of xxx +! xxyav, & ! mean T-cell value of xxy +! xyyav, & ! mean T-cell value of xyy +! yyyav ! mean T-cell value of yyy + + real (kind=dbl_kind), & + dimension (:,:,:,:,:), allocatable, public :: & + mne, & ! matrices used for coordinate transformations in remapping + mnw, & ! ne = northeast corner, nw = northwest, etc. + mse, & + msw + ! masks real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & hm , & ! land/boundary mask, thickness (T-cell) @@ -151,7 +180,7 @@ module ice_grid logical (kind=log_kind), public :: & use_bathymetry, & ! flag for reading in bathymetry_file - save_ghte_ghtn, & ! flag for saving global hte and htn during initialization + pgl_global_ext, & ! flag for init primary grid lengths (global ext.) scale_dxdy ! flag to apply scale factor to vary dx/dy in rectgrid logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & @@ -222,9 +251,19 @@ subroutine alloc_grid ANGLET (nx_block,ny_block,max_blocks), & ! ANGLE converted to T-cells bathymetry(nx_block,ny_block,max_blocks),& ! ocean depth, for grounding keels and bergs (m) ocn_gridcell_frac(nx_block,ny_block,max_blocks),& ! only relevant for lat-lon grids + cyp (nx_block,ny_block,max_blocks), & ! 1.5*HTE - 0.5*HTW + cxp (nx_block,ny_block,max_blocks), & ! 1.5*HTN - 0.5*HTS + cym (nx_block,ny_block,max_blocks), & ! 0.5*HTE - 1.5*HTW + cxm (nx_block,ny_block,max_blocks), & ! 0.5*HTN - 1.5*HTS + dxhy (nx_block,ny_block,max_blocks), & ! 0.5*(HTE - HTW) + dyhx (nx_block,ny_block,max_blocks), & ! 0.5*(HTN - HTS) + xav (nx_block,ny_block,max_blocks), & ! mean T-cell value of x + yav (nx_block,ny_block,max_blocks), & ! mean T-cell value of y + xxav (nx_block,ny_block,max_blocks), & ! mean T-cell value of xx + yyav (nx_block,ny_block,max_blocks), & ! mean T-cell value of yy hm (nx_block,ny_block,max_blocks), & ! land/boundary mask, thickness (T-cell) bm (nx_block,ny_block,max_blocks), & ! task/block id - uvm (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) + uvm (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) - water in case of all water point npm (nx_block,ny_block,max_blocks), & ! land/boundary mask (N-cell) epm (nx_block,ny_block,max_blocks), & ! land/boundary mask (E-cell) kmt (nx_block,ny_block,max_blocks), & ! ocean topography mask for bathymetry (T-cell) @@ -244,43 +283,32 @@ subroutine alloc_grid latn_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for N point lone_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for E point late_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for E point + mne (2,2,nx_block,ny_block,max_blocks), & ! matrices used for coordinate transformations in remapping + mnw (2,2,nx_block,ny_block,max_blocks), & ! ne = northeast corner, nw = northwest, etc. + mse (2,2,nx_block,ny_block,max_blocks), & + msw (2,2,nx_block,ny_block,max_blocks), & stat=ierr) - if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory1', file=__FILE__, line=__LINE__) - - if (save_ghte_ghtn) then - if (my_task == master_task) then - allocate( & - G_HTE(nx_global+2*nghost, ny_global+2*nghost), & ! length of eastern edge of T-cell (global ext.) - G_HTN(nx_global+2*nghost, ny_global+2*nghost), & ! length of northern edge of T-cell (global ext.) - stat=ierr) - else - allocate( & - G_HTE(1,1), & ! needed for debug checks - G_HTN(1,1), & ! never used in code - stat=ierr) - endif - if (ierr/=0) call abort_ice(subname//' ERROR: Out of memory3', file=__FILE__, line=__LINE__) + if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') + + if (grid_ice == 'CD' .or. grid_ice == 'C') then + allocate( & + ratiodxN (nx_block,ny_block,max_blocks), & + ratiodyE (nx_block,ny_block,max_blocks), & + ratiodxNr(nx_block,ny_block,max_blocks), & + ratiodyEr(nx_block,ny_block,max_blocks), & + stat=ierr) + if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') endif - end subroutine alloc_grid - -!======================================================================= - -! -! DeAllocate space for variables no longer needed after initialization -! - subroutine dealloc_grid - - integer (int_kind) :: ierr - - character(len=*), parameter :: subname = '(dealloc_grid)' - - if (save_ghte_ghtn) then - deallocate(G_HTE, G_HTN, stat=ierr) - if (ierr/=0) call abort_ice(subname//' ERROR: Dealloc error1', file=__FILE__, line=__LINE__) + if (pgl_global_ext) then + allocate( & + G_HTE(nx_global+2*nghost, ny_global+2*nghost), & ! length of eastern edge of T-cell (global ext.) + G_HTN(nx_global+2*nghost, ny_global+2*nghost), & ! length of northern edge of T-cell (global ext.) + stat=ierr) + if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') endif - end subroutine dealloc_grid + end subroutine alloc_grid !======================================================================= @@ -291,6 +319,10 @@ end subroutine dealloc_grid subroutine init_grid1 + use ice_blocks, only: nx_block, ny_block + use ice_broadcast, only: broadcast_array + use ice_constants, only: c1 + integer (kind=int_kind) :: & fid_grid, & ! file id for netCDF grid file fid_kmt ! file id for netCDF kmt file @@ -324,12 +356,7 @@ subroutine init_grid1 if (grid_type == 'tripole' .and. ns_boundary_type /= 'tripole' .and. & ns_boundary_type /= 'tripoleT') then - call abort_ice(subname//' ERROR: grid_type tripole needs tripole ns_boundary_type', & - file=__FILE__, line=__LINE__) - endif - - if (grid_type == 'tripole' .and. (mod(nx_global,2)/=0)) then - call abort_ice(subname//' ERROR: grid_type tripole requires even nx_global number', & + call abort_ice(subname//'ERROR: grid_type tripole needs tripole ns_boundary_type', & file=__FILE__, line=__LINE__) endif @@ -413,6 +440,11 @@ end subroutine init_grid1 subroutine init_grid2 + use ice_blocks, only: get_block, block, nx_block, ny_block + use ice_constants, only: c0, c1, c2, p5, p25, c1p5, & + field_loc_center, field_loc_NEcorner, field_loc_Nface, field_loc_Eface, & + field_type_scalar, field_type_vector, field_type_angle + use ice_domain_size, only: max_blocks #if defined (_OPENMP) use OMP_LIB #endif @@ -475,6 +507,7 @@ subroutine init_grid2 ! Diagnose OpenMP thread schedule, force order in output !----------------------------------------------------------------- +! This code does not work in CESM. Needs to be investigated further. #if defined (_OPENMP) !$OMP PARALLEL DO ORDERED PRIVATE(iblk) SCHEDULE(runtime) do iblk = 1, nblocks @@ -541,6 +574,34 @@ subroutine init_grid2 enddo enddo + do j = jlo, jhi + do i = ilo, ihi + dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) + dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) + enddo + enddo + + do j = jlo, jhi+1 + do i = ilo, ihi+1 + cyp(i,j,iblk) = (c1p5*HTE(i,j,iblk) - p5*HTE(i-1,j,iblk)) + cxp(i,j,iblk) = (c1p5*HTN(i,j,iblk) - p5*HTN(i,j-1,iblk)) + ! match order of operations in cyp, cxp for tripole grids + cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) + cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) + enddo + enddo + + if (grid_ice == 'CD' .or. grid_ice == 'C') then + do j = jlo, jhi + do i = ilo, ihi + ratiodxN (i,j,iblk) = - dxN(i+1,j ,iblk) / dxN(i,j,iblk) + ratiodyE (i,j,iblk) = - dyE(i ,j+1,iblk) / dyE(i,j,iblk) + ratiodxNr(i,j,iblk) = c1 / ratiodxN(i,j,iblk) + ratiodyEr(i,j,iblk) = c1 / ratiodyE(i,j,iblk) + enddo + enddo + endif + enddo ! iblk !$OMP END PARALLEL DO @@ -556,6 +617,13 @@ subroutine init_grid2 call ice_timer_start(timer_bound) + call ice_HaloUpdate (dxhy, halo_info, & + field_loc_center, field_type_vector, & + fillValue=c1) + call ice_HaloUpdate (dyhx, halo_info, & + field_loc_center, field_type_vector, & + fillValue=c1) + ! Update just on the tripole seam to ensure bit-for-bit symmetry across seam call ice_HaloUpdate (tarea, halo_info, & field_loc_center, field_type_scalar, & @@ -563,24 +631,12 @@ subroutine init_grid2 call ice_HaloUpdate (uarea, halo_info, & field_loc_NEcorner, field_type_scalar, & fillValue=c1, tripoleOnly=.true.) - call ice_HaloUpdate (narea, halo_info, & - field_loc_Nface, field_type_scalar, & - fillValue=c1, tripoleOnly=.true.) - call ice_HaloUpdate (earea, halo_info, & - field_loc_Eface, field_type_scalar, & - fillValue=c1, tripoleOnly=.true.) call ice_HaloUpdate (tarear, halo_info, & field_loc_center, field_type_scalar, & fillValue=c1, tripoleOnly=.true.) call ice_HaloUpdate (uarear, halo_info, & field_loc_NEcorner, field_type_scalar, & fillValue=c1, tripoleOnly=.true.) - call ice_HaloUpdate (narear, halo_info, & - field_loc_Nface, field_type_scalar, & - fillValue=c1, tripoleOnly=.true.) - call ice_HaloUpdate (earear, halo_info, & - field_loc_Eface, field_type_scalar, & - fillValue=c1, tripoleOnly=.true.) call ice_timer_stop(timer_bound) @@ -603,37 +659,36 @@ subroutine init_grid2 if (trim(grid_type) == 'cpom_grid') then ANGLET(:,:,:) = ANGLE(:,:,:) else if (.not. (l_readCenter)) then - ANGLET = c0 + ANGLET = c0 - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & - !$OMP angle_0,angle_w,angle_s,angle_sw) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi + !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & + !$OMP angle_0,angle_w,angle_s,angle_sw) + do iblk = 1, nblocks + this_block = get_block(blocks_ice(iblk),iblk) + ilo = this_block%ilo + ihi = this_block%ihi + jlo = this_block%jlo + jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - angle_0 = ANGLE(i ,j ,iblk) ! w----0 - angle_w = ANGLE(i-1,j ,iblk) ! | | - angle_s = ANGLE(i, j-1,iblk) ! | | - angle_sw = ANGLE(i-1,j-1,iblk) ! sw---s - ANGLET(i,j,iblk) = atan2(p25*(sin(angle_0)+ & - sin(angle_w)+ & - sin(angle_s)+ & - sin(angle_sw)),& - p25*(cos(angle_0)+ & - cos(angle_w)+ & - cos(angle_s)+ & - cos(angle_sw))) - enddo - enddo + do j = jlo, jhi + do i = ilo, ihi + angle_0 = ANGLE(i ,j ,iblk) ! w----0 + angle_w = ANGLE(i-1,j ,iblk) ! | | + angle_s = ANGLE(i, j-1,iblk) ! | | + angle_sw = ANGLE(i-1,j-1,iblk) ! sw---s + ANGLET(i,j,iblk) = atan2(p25*(sin(angle_0)+ & + sin(angle_w)+ & + sin(angle_s)+ & + sin(angle_sw)),& + p25*(cos(angle_0)+ & + cos(angle_w)+ & + cos(angle_s)+ & + cos(angle_sw))) enddo - !$OMP END PARALLEL DO + enddo + enddo + !$OMP END PARALLEL DO endif ! cpom_grid - if (trim(grid_type) == 'regional' .and. & (.not. (l_readCenter))) then ! for W boundary extrapolate from interior @@ -663,10 +718,8 @@ subroutine init_grid2 call makemask ! velocity mask, hemisphere masks if (.not. (l_readCenter)) then - call Tlatlon ! get lat, lon on the T grid + call Tlatlon ! get lat, lon on the T grid endif - call NElatlon ! get lat, lon on the N, E grid - !----------------------------------------------------------------- ! bathymetry !----------------------------------------------------------------- @@ -676,7 +729,7 @@ subroutine init_grid2 elseif (trim(bathymetry_format) == 'pop') then call get_bathymetry_popfile else - call abort_ice(subname//' ERROR: bathymetry_format value must be default or pop', & + call abort_ice(subname//'ERROR: bathymetry_format value must be default or pop', & file=__FILE__, line=__LINE__) endif @@ -728,6 +781,12 @@ end subroutine init_grid2 subroutine popgrid + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c1, p5, & + field_loc_center, field_loc_NEcorner, & + field_type_scalar, field_type_angle + use ice_domain_size, only: max_blocks + integer (kind=int_kind) :: & i, j, iblk, & ilo,ihi,jlo,jhi ! beginning and end of physical domain @@ -841,6 +900,11 @@ end subroutine popgrid subroutine popgrid_nc + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c1, & + field_loc_center, field_loc_NEcorner, & + field_type_scalar, field_type_angle + use ice_domain_size, only: max_blocks #ifdef USE_NETCDF use netcdf #endif @@ -991,7 +1055,7 @@ subroutine popgrid_nc call ice_close_nc(fid_kmt) endif #else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) #endif @@ -1007,7 +1071,11 @@ end subroutine popgrid_nc subroutine latlongrid +! use ice_boundary + use ice_domain_size use ice_scam, only : scmlat, scmlon, single_column + use ice_constants, only: c0, c1, p5, p25, & + field_loc_center, field_type_scalar, radius #ifdef USE_NETCDF use netcdf #endif @@ -1071,13 +1139,9 @@ subroutine latlongrid call ice_open_nc(kmt_file, ncid) status = nf90_inq_dimid (ncid, 'ni', dimid) - call ice_check_nc(status, subname//' ERROR: inq_dimid ni', file=__FILE__, line=__LINE__) status = nf90_inquire_dimension(ncid, dimid, len=ni) - call ice_check_nc(status, subname//' ERROR: inq dim ni', file=__FILE__, line=__LINE__) status = nf90_inq_dimid (ncid, 'nj', dimid) - call ice_check_nc(status, subname//' ERROR: inq_dimid nj', file=__FILE__, line=__LINE__) status = nf90_inquire_dimension(ncid, dimid, len=nj) - call ice_check_nc(status, subname//' ERROR: inq dim nj', file=__FILE__, line=__LINE__) end if ! Determine start/count to read in for either single column or global lat-lon grid @@ -1090,7 +1154,7 @@ subroutine latlongrid write(nu_diag,*) 'Because you have selected the column model flag' write(nu_diag,*) 'Please set nx_global=ny_global=1 in file' write(nu_diag,*) 'ice_domain_size.F and recompile' - call abort_ice (subname//' ERROR: check nx_global, ny_global', file=__FILE__, line=__LINE__) + call abort_ice (subname//'ERROR: check nx_global, ny_global') endif end if @@ -1103,17 +1167,17 @@ subroutine latlongrid start3=(/1,1,1/) count3=(/ni,nj,1/) status = nf90_inq_varid(ncid, 'xc' , varid) - call ice_check_nc(status, subname//' ERROR: inq_varid xc', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid xc') status = nf90_get_var(ncid, varid, glob_grid, start3, count3) - call ice_check_nc(status, subname//' ERROR: get_var xc', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' get_var xc') do i = 1,ni lons(i) = glob_grid(i,1) end do status = nf90_inq_varid(ncid, 'yc' , varid) - call ice_check_nc(status, subname//' ERROR: inq_varid yc', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid yc') status = nf90_get_var(ncid, varid, glob_grid, start3, count3) - call ice_check_nc(status, subname//' ERROR: get_var yc', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' get_var yc') do j = 1,nj lats(j) = glob_grid(1,j) end do @@ -1132,29 +1196,29 @@ subroutine latlongrid deallocate(glob_grid) status = nf90_inq_varid(ncid, 'xc' , varid) - call ice_check_nc(status, subname//' ERROR: inq_varid xc', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid xc') status = nf90_get_var(ncid, varid, scamdata, start) - call ice_check_nc(status, subname//' ERROR: get_var xc', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' get_var xc') TLON = scamdata status = nf90_inq_varid(ncid, 'yc' , varid) - call ice_check_nc(status, subname//' ERROR: inq_varid yc', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid yc') status = nf90_get_var(ncid, varid, scamdata, start) - call ice_check_nc(status, subname//' ERROR: get_var yc', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' get_var yc') TLAT = scamdata status = nf90_inq_varid(ncid, 'area' , varid) - call ice_check_nc(status, subname//' ERROR: inq_varid area', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid area') status = nf90_get_var(ncid, varid, scamdata, start) - call ice_check_nc(status, subname//' ERROR: get_var are', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' get_var are') tarea = scamdata status = nf90_inq_varid(ncid, 'mask' , varid) - call ice_check_nc(status, subname//' ERROR: inq_varid mask', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid mask') status = nf90_get_var(ncid, varid, scamdata, start) - call ice_check_nc(status, subname//' ERROR: get_var mask', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' get_var mask') hm = scamdata status = nf90_inq_varid(ncid, 'frac' , varid) - call ice_check_nc(status, subname//' ERROR: inq_varid frac', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' inq_varid frac') status = nf90_get_var(ncid, varid, scamdata, start) - call ice_check_nc(status, subname//' ERROR: get_var frac', file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice (subname//' get_var frac') ocn_gridcell_frac = scamdata else ! Check for consistency @@ -1162,8 +1226,7 @@ subroutine latlongrid if (nx_global /= ni .and. ny_global /= nj) then write(nu_diag,*) 'latlongrid: ni,nj = ',ni,nj write(nu_diag,*) 'latlongrid: nx_g,ny_g = ',nx_global, ny_global - call abort_ice (subname//' ERROR: ni,nj not equal to nx_global,ny_global', & - file=__FILE__, line=__LINE__) + call abort_ice (subname//'ERROR: ni,nj not equal to nx_global,ny_global') end if end if @@ -1265,6 +1328,12 @@ subroutine latlongrid dyN (i,j,iblk) = 1.e36_dbl_kind dxE (i,j,iblk) = 1.e36_dbl_kind dyE (i,j,iblk) = 1.e36_dbl_kind + dxhy (i,j,iblk) = 1.e36_dbl_kind + dyhx (i,j,iblk) = 1.e36_dbl_kind + cyp (i,j,iblk) = 1.e36_dbl_kind + cxp (i,j,iblk) = 1.e36_dbl_kind + cym (i,j,iblk) = 1.e36_dbl_kind + cxm (i,j,iblk) = 1.e36_dbl_kind enddo enddo enddo @@ -1272,7 +1341,7 @@ subroutine latlongrid call makemask #else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) #endif @@ -1286,6 +1355,10 @@ end subroutine latlongrid subroutine rectgrid + use ice_constants, only: c0, c1, c2, radius, cm_to_m, & + field_loc_center, field_loc_NEcorner, field_type_scalar + use ice_domain, only: close_boundaries + integer (kind=int_kind) :: & i, j, & imid, jmid @@ -1400,22 +1473,6 @@ subroutine rectgrid enddo enddo - elseif (trim(kmt_type) == 'channel_oneeast') then - - do j = ny_global/2,ny_global/2 ! one channel wide - do i = 1,nx_global ! open sides - work_g1(i,j) = c1 ! NOTE nx_global > 5 - enddo - enddo - - elseif (trim(kmt_type) == 'channel_onenorth') then - - do j = 1,ny_global ! open sides - do i = nx_global/2,nx_global/2 ! one channel wide - work_g1(i,j) = c1 ! NOTE nx_global > 5 - enddo - enddo - elseif (trim(kmt_type) == 'wall') then do j = 1,ny_global ! open except @@ -1455,8 +1512,7 @@ subroutine rectgrid else - call abort_ice(subname//' ERROR: unknown kmt_type '//trim(kmt_type), & - file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: unknown kmt_type '//trim(kmt_type)) endif ! kmt_type @@ -1482,6 +1538,8 @@ subroutine rectgrid_scale_dxdy ! generate a variable spaced rectangluar grid. ! extend spacing from center of grid outward. + use ice_constants, only: c0, c1, c2, radius, cm_to_m, & + field_loc_center, field_loc_NEcorner, field_type_scalar integer (kind=int_kind) :: & i, j, iblk, & @@ -1645,6 +1703,8 @@ end subroutine rectgrid_scale_dxdy subroutine grid_boxislands_kmt (work) + use ice_constants, only: c0, c1, c20 + real (kind=dbl_kind), dimension(:,:), intent(inout) :: work integer (kind=int_kind) :: & @@ -1658,8 +1718,7 @@ subroutine grid_boxislands_kmt (work) nyb = int(real(ny_global, dbl_kind) / c20, int_kind) if (nxb < 1 .or. nyb < 1) & - call abort_ice(subname//' ERROR: requires larger grid size', & - file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: requires larger grid size') ! initialize work area as all ocean (c1). work(:,:) = c1 @@ -1779,6 +1838,11 @@ end subroutine grid_boxislands_kmt subroutine cpomgrid + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c1, m_to_cm, & + field_loc_NEcorner, field_type_scalar + use ice_domain_size, only: max_blocks + integer (kind=int_kind) :: & i, j, iblk, & ilo,ihi,jlo,jhi ! beginning and end of physical domain @@ -1865,8 +1929,8 @@ subroutine cpomgrid close (nu_kmt) endif - write(nu_diag,*) subname," min/max HTN: ", minval(HTN), maxval(HTN) - write(nu_diag,*) subname," min/max HTE: ", minval(HTE), maxval(HTE) + write(nu_diag,*) "min/max HTN: ", minval(HTN), maxval(HTN) + write(nu_diag,*) "min/max HTE: ", minval(HTE), maxval(HTE) end subroutine cpomgrid @@ -1880,6 +1944,10 @@ end subroutine cpomgrid subroutine primary_grid_lengths_HTN(work_g) + use ice_constants, only: p25, p5, c2, cm_to_m, & + field_loc_center, field_loc_NEcorner, & + field_loc_Nface, field_type_scalar + real (kind=dbl_kind), dimension(:,:) :: work_g ! global array holding HTN ! local variables @@ -1915,14 +1983,10 @@ subroutine primary_grid_lengths_HTN(work_g) work_g2(i,j) = p5*(work_g(i,j) + work_g(ip1,j)) ! dxU enddo enddo - if (save_ghte_ghtn) then - do j = 1, ny_global - do i = 1,nx_global - G_HTN(i+nghost,j+nghost) = work_g(i,j) - enddo - enddo - call global_ext_halo(G_HTN) - endif + endif + if (pgl_global_ext) then + call primary_grid_lengths_global_ext( & + G_HTN, work_g, ew_boundary_type, ns_boundary_type) endif call scatter_global(HTN, work_g, master_task, distrb_info, & field_loc_Nface, field_type_scalar) @@ -1985,6 +2049,10 @@ end subroutine primary_grid_lengths_HTN subroutine primary_grid_lengths_HTE(work_g) + use ice_constants, only: p25, p5, c2, cm_to_m, & + field_loc_center, field_loc_NEcorner, & + field_loc_Eface, field_type_scalar + real (kind=dbl_kind), dimension(:,:) :: work_g ! global array holding HTE ! local variables @@ -2023,14 +2091,10 @@ subroutine primary_grid_lengths_HTE(work_g) work_g2(i,ny_global) = c2*work_g(i,ny_global-1) - work_g(i,ny_global-2) ! dyU enddo endif - if (save_ghte_ghtn) then - do j = 1, ny_global - do i = 1, nx_global - G_HTE(i+nghost,j+nghost) = work_g(i,j) - enddo - enddo - call global_ext_halo(G_HTE) - endif + endif + if (pgl_global_ext) then + call primary_grid_lengths_global_ext( & + G_HTE, work_g, ew_boundary_type, ns_boundary_type) endif call scatter_global(HTE, work_g, master_task, distrb_info, & field_loc_Eface, field_type_scalar) @@ -2087,48 +2151,6 @@ end subroutine primary_grid_lengths_HTE !======================================================================= -! This subroutine fills ghost cells in global extended grid - - subroutine global_ext_halo(array) - - real (kind=dbl_kind), dimension(:,:), intent(inout) :: & - array ! extended global grid size nx+2*nghost, ny+2*nghost - ! nghost+1:nghost+nx_global and nghost+1:nghost+ny_global filled on entry - - integer (kind=int_kind) :: n - - character(len=*), parameter :: subname = '(global_ext_halo)' - - do n = 1,nghost - if (ns_boundary_type =='cyclic') then - array(:,n) = array(:,ny_global+n) - array(:,ny_global+nghost+n) = array(:,nghost+n) - elseif (ns_boundary_type == 'open') then - array(:,n) = array(:,nghost+1) - array(:,ny_global+nghost+n) = array(:,ny_global+nghost) - else - array(:,n) = c0 - array(:,ny_global+nghost+n) = c0 - endif - enddo - - do n = 1,nghost - if (ew_boundary_type =='cyclic') then - array(n ,:) = array(nx_global+n,:) - array(nx_global+nghost+n,:) = array(nghost+n ,:) - elseif (ew_boundary_type == 'open') then - array(n ,:) = array(nghost+1 ,:) - array(nx_global+nghost+n,:) = array(nx_global+nghost,:) - else - array(n ,:) = c0 - array(nx_global+nghost+n,:) = c0 - endif - enddo - - end subroutine global_ext_halo - -!======================================================================= - ! Sets the boundary values for the T cell land mask (hm) and ! makes the logical land masks for T and U cells (tmask, umask) ! and N and E cells (nmask, emask). @@ -2138,6 +2160,10 @@ end subroutine global_ext_halo subroutine makemask + use ice_constants, only: c0, p5, c1p5, & + field_loc_center, field_loc_NEcorner, field_type_scalar, & + field_loc_Nface, field_loc_Eface + integer (kind=int_kind) :: & i, j, iblk, & ilo,ihi,jlo,jhi ! beginning and end of physical domain @@ -2288,6 +2314,10 @@ end subroutine makemask subroutine Tlatlon + use ice_constants, only: c0, c1, c1p5, c2, c4, p5, & + field_loc_center, field_loc_Nface, field_loc_Eface, & + field_type_scalar + integer (kind=int_kind) :: & i, j, iblk , & ! horizontal indices ilo,ihi,jlo,jhi ! beginning and end of physical domain @@ -2301,10 +2331,6 @@ subroutine Tlatlon character(len=*), parameter :: subname = '(Tlatlon)' - if (my_task==master_task) then - write(nu_diag,*) subname,' called' - endif - call icepack_query_parameters(rad_to_deg_out=rad_to_deg) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & @@ -2312,6 +2338,10 @@ subroutine Tlatlon TLAT(:,:,:) = c0 TLON(:,:,:) = c0 + NLAT(:,:,:) = c0 + NLON(:,:,:) = c0 + ELAT(:,:,:) = c0 + ELON(:,:,:) = c0 !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & !$OMP x1,y1,z1,x2,y2,z2,x3,y3,z3,x4,y4,z4, & @@ -2364,87 +2394,15 @@ subroutine Tlatlon ! TLAT in radians North TLAT(i,j,iblk) = asin(tz) +! these two loops should be merged to save cos/sin calculations, +! but atan2 is not bit-for-bit. This suggests the result for atan2 depends on +! the prior atan2 call ??? not sure what's going on. +#if (1 == 1) enddo ! i enddo ! j enddo ! iblk !$OMP END PARALLEL DO - if (trim(grid_type) == 'regional') then - ! for W boundary extrapolate from interior - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - i = ilo - if (this_block%i_glob(i) == 1) then - do j = jlo, jhi - TLON(i,j,iblk) = c2*TLON(i+1,j,iblk) - & - TLON(i+2,j,iblk) - TLAT(i,j,iblk) = c2*TLAT(i+1,j,iblk) - & - TLAT(i+2,j,iblk) - enddo - endif - enddo - !$OMP END PARALLEL DO - endif ! regional - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (TLON, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (TLAT, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1) - call ice_HaloExtrapolate(TLON, distrb_info, & - ew_boundary_type, ns_boundary_type) - call ice_HaloExtrapolate(TLAT, distrb_info, & - ew_boundary_type, ns_boundary_type) - - end subroutine Tlatlon - -!======================================================================= - -! Initializes latitude and longitude on N, E grid -! -! author: T. Craig from Tlatlon - - subroutine NElatlon - - use ice_constants, only: c0, c1, c1p5, c2, c4, p5, & - field_loc_center, field_loc_Nface, field_loc_Eface, & - field_type_scalar - - integer (kind=int_kind) :: & - i, j, iblk , & ! horizontal indices - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - real (kind=dbl_kind) :: & - z1,x1,y1,z2,x2,y2,z3,x3,y3,z4,x4,y4,tx,ty,tz,da, & - rad_to_deg - - type (block) :: & - this_block ! block information for current block - - character(len=*), parameter :: subname = '(NElatlon)' - - if (my_task==master_task) then - write(nu_diag,*) subname,' called' - endif - - call icepack_query_parameters(rad_to_deg_out=rad_to_deg) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - NLAT(:,:,:) = c0 - NLON(:,:,:) = c0 - ELAT(:,:,:) = c0 - ELON(:,:,:) = c0 - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & !$OMP x1,y1,z1,x2,y2,z2,x3,y3,z3,x4,y4,z4, & !$OMP tx,ty,tz,da) @@ -2477,7 +2435,7 @@ subroutine NElatlon x4 = cos(ULON(i,j,iblk))*z4 y4 = sin(ULON(i,j,iblk))*z4 z4 = sin(ULAT(i,j,iblk)) - +#endif ! --------- ! NLON/NLAT 2 pt computation (pts 3, 4) ! --------- @@ -2532,6 +2490,10 @@ subroutine NElatlon i = ilo if (this_block%i_glob(i) == 1) then do j = jlo, jhi + TLON(i,j,iblk) = c2*TLON(i+1,j,iblk) - & + TLON(i+2,j,iblk) + TLAT(i,j,iblk) = c2*TLAT(i+1,j,iblk) - & + TLAT(i+2,j,iblk) NLON(i,j,iblk) = c1p5*TLON(i+1,j,iblk) - & p5*TLON(i+2,j,iblk) NLAT(i,j,iblk) = c1p5*TLAT(i+1,j,iblk) - & @@ -2543,6 +2505,12 @@ subroutine NElatlon endif ! regional call ice_timer_start(timer_bound) + call ice_HaloUpdate (TLON, halo_info, & + field_loc_center, field_type_scalar, & + fillValue=c1) + call ice_HaloUpdate (TLAT, halo_info, & + field_loc_center, field_type_scalar, & + fillValue=c1) call ice_HaloUpdate (NLON, halo_info, & field_loc_Nface, field_type_scalar, & fillValue=c1) @@ -2555,6 +2523,10 @@ subroutine NElatlon call ice_HaloUpdate (ELAT, halo_info, & field_loc_Eface, field_type_scalar, & fillValue=c1) + call ice_HaloExtrapolate(TLON, distrb_info, & + ew_boundary_type, ns_boundary_type) + call ice_HaloExtrapolate(TLAT, distrb_info, & + ew_boundary_type, ns_boundary_type) call ice_HaloExtrapolate(NLON, distrb_info, & ew_boundary_type, ns_boundary_type) call ice_HaloExtrapolate(NLAT, distrb_info, & @@ -2577,10 +2549,12 @@ subroutine NElatlon if (my_task==master_task) then write(nu_diag,*) ' ' - write(nu_diag,*) subname,' min/max ULON:', y1*rad_to_deg, y2*rad_to_deg - write(nu_diag,*) subname,' min/max ULAT:', y3*rad_to_deg, y4*rad_to_deg - write(nu_diag,*) subname,' min/max TLON:', x1*rad_to_deg, x2*rad_to_deg - write(nu_diag,*) subname,' min/max TLAT:', x3*rad_to_deg, x4*rad_to_deg +! if (nx_block > 5+2*nghost .and. ny_block > 5+2*nghost) then + write(nu_diag,*) 'min/max ULON:', y1*rad_to_deg, y2*rad_to_deg + write(nu_diag,*) 'min/max ULAT:', y3*rad_to_deg, y4*rad_to_deg +! endif + write(nu_diag,*) 'min/max TLON:', x1*rad_to_deg, x2*rad_to_deg + write(nu_diag,*) 'min/max TLAT:', x3*rad_to_deg, x4*rad_to_deg endif ! my_task x1 = global_minval(NLON, distrb_info, nmask) @@ -2595,13 +2569,15 @@ subroutine NElatlon if (my_task==master_task) then write(nu_diag,*) ' ' - write(nu_diag,*) subname,' min/max NLON:', x1*rad_to_deg, x2*rad_to_deg - write(nu_diag,*) subname,' min/max NLAT:', x3*rad_to_deg, x4*rad_to_deg - write(nu_diag,*) subname,' min/max ELON:', y1*rad_to_deg, y2*rad_to_deg - write(nu_diag,*) subname,' min/max ELAT:', y3*rad_to_deg, y4*rad_to_deg +! if (nx_block > 5+2*nghost .and. ny_block > 5+2*nghost) then + write(nu_diag,*) 'min/max NLON:', x1*rad_to_deg, x2*rad_to_deg + write(nu_diag,*) 'min/max NLAT:', x3*rad_to_deg, x4*rad_to_deg + write(nu_diag,*) 'min/max ELON:', y1*rad_to_deg, y2*rad_to_deg + write(nu_diag,*) 'min/max ELAT:', y3*rad_to_deg, y4*rad_to_deg +! endif endif ! my_task - end subroutine NElatlon + end subroutine Tlatlon !======================================================================= @@ -2724,7 +2700,7 @@ subroutine grid_average_X2Y_NEversion(type,work1a,grid1a,work1b,grid1b,work2,gri call grid_average_X2Y_2('NE2TA',work1b,narea,npm,work1a,earea,epm,work2) case default - call abort_ice(subname//' ERROR: unknown X2Y '//trim(X2Y), file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) end select end subroutine grid_average_X2Y_NEversion @@ -2833,7 +2809,7 @@ subroutine grid_average_X2Y_1(X2Y,work1,work2) call grid_average_X2YA('SE',work1,narea,work2) case default - call abort_ice(subname//' ERROR: unknown X2Y '//trim(X2Y), file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) end select end subroutine grid_average_X2Y_1 @@ -2945,7 +2921,7 @@ subroutine grid_average_X2Y_1f(X2Y,work1,wght1,mask1,work2) call grid_average_X2YA('SE',work1,wght1,work2) case default - call abort_ice(subname//' ERROR: unknown X2Y '//trim(X2Y), file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) end select end subroutine grid_average_X2Y_1f @@ -2960,6 +2936,8 @@ end subroutine grid_average_X2Y_1f subroutine grid_average_X2YS(dir,work1,wght1,mask1,work2) + use ice_constants, only: c0 + character(len=*) , intent(in) :: & dir @@ -3174,7 +3152,7 @@ subroutine grid_average_X2YS(dir,work1,wght1,mask1,work2) !$OMP END PARALLEL DO case default - call abort_ice(subname//' ERROR: unknown option '//trim(dir), file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) end select end subroutine grid_average_X2YS @@ -3189,6 +3167,8 @@ end subroutine grid_average_X2YS subroutine grid_average_X2YA(dir,work1,wght1,work2) + use ice_constants, only: c0 + character(len=*) , intent(in) :: & dir @@ -3402,7 +3382,7 @@ subroutine grid_average_X2YA(dir,work1,wght1,work2) !$OMP END PARALLEL DO case default - call abort_ice(subname//' ERROR: unknown option '//trim(dir), file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) end select end subroutine grid_average_X2YA @@ -3417,6 +3397,8 @@ end subroutine grid_average_X2YA subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) + use ice_constants, only: c0, p25, p5 + character(len=*) , intent(in) :: & dir @@ -3604,7 +3586,7 @@ subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) !$OMP END PARALLEL DO case default - call abort_ice(subname//' ERROR: unknown option '//trim(dir), file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) end select end subroutine grid_average_X2YF @@ -3619,6 +3601,8 @@ end subroutine grid_average_X2YF subroutine grid_average_X2Y_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,work2) + use ice_constants, only: c0 + character(len=*) , intent(in) :: & dir @@ -3749,7 +3733,7 @@ subroutine grid_average_X2Y_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,work !$OMP END PARALLEL DO case default - call abort_ice(subname//' ERROR: unknown option '//trim(dir), file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: unknown option '//trim(dir)) end select end subroutine grid_average_X2Y_2 @@ -3779,7 +3763,7 @@ real(kind=dbl_kind) function grid_neighbor_min(field, i, j, grid_location) resul case('N') mini = min(field(i,j), field(i,j+1)) case default - call abort_ice(subname // ' unknown grid_location: ' // grid_location, file=__FILE__, line=__LINE__) + call abort_ice(subname // ' unknown grid_location: ' // grid_location) end select end function grid_neighbor_min @@ -3810,7 +3794,7 @@ real(kind=dbl_kind) function grid_neighbor_max(field, i, j, grid_location) resul case('N') maxi = max(field(i,j), field(i,j+1)) case default - call abort_ice(subname // ' unknown grid_location: ' // grid_location, file=__FILE__, line=__LINE__) + call abort_ice(subname // ' unknown grid_location: ' // grid_location) end select end function grid_neighbor_max @@ -3829,6 +3813,11 @@ end function grid_neighbor_max subroutine gridbox_corners + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c2, c360, & + field_loc_NEcorner, field_type_scalar + use ice_domain_size, only: max_blocks + integer (kind=int_kind) :: & i,j,iblk,icorner,& ! index counters ilo,ihi,jlo,jhi ! beginning and end of physical domain @@ -4020,6 +4009,11 @@ end subroutine gridbox_corners subroutine gridbox_edges + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c2, c360, & + field_loc_NEcorner, field_type_scalar + use ice_domain_size, only: max_blocks + integer (kind=int_kind) :: & i,j,iblk,icorner,& ! index counters ilo,ihi,jlo,jhi ! beginning and end of physical domain @@ -4315,6 +4309,11 @@ end subroutine gridbox_edges subroutine gridbox_verts(work_g,vbounds) + use ice_blocks, only: nx_block, ny_block + use ice_constants, only: c0, c2, & + field_loc_NEcorner, field_type_scalar + use ice_domain_size, only: max_blocks + real (kind=dbl_kind), dimension(:,:), intent(in) :: & work_g @@ -4429,6 +4428,8 @@ end subroutine gridbox_verts subroutine get_bathymetry + use ice_constants, only: c0 + integer (kind=int_kind) :: & i, j, k, iblk ! loop indices @@ -4485,8 +4486,7 @@ subroutine get_bathymetry do j = 1, ny_block do i = 1, nx_block k = min(nint(kmt(i,j,iblk)),nlevel) - if (k > nlevel) call abort_ice(subname//' kmt gt nlevel error', & - file=__FILE__, line=__LINE__) + if (k > nlevel) call abort_ice(subname//' kmt gt nlevel error') if (k > 0) bathymetry(i,j,iblk) = depth(k) enddo enddo @@ -4545,10 +4545,10 @@ subroutine get_bathymetry_popfile if (my_task == master_task) then call get_fileunit(fid) open(fid,file=bathymetry_file,form='formatted',iostat=ierr) - if (ierr/=0) call abort_ice(subname//' open error', file=__FILE__, line=__LINE__) + if (ierr/=0) call abort_ice(subname//' open error') do k = 1,nlevel read(fid,*,iostat=ierr) thick(k) - if (ierr/=0) call abort_ice(subname//' read error', file=__FILE__, line=__LINE__) + if (ierr/=0) call abort_ice(subname//' read error') enddo call release_fileunit(fid) endif @@ -4575,7 +4575,7 @@ subroutine get_bathymetry_popfile depth(1) = thick(1) do k = 2, nlevel depth(k) = depth(k-1) + thick(k) - if (depth(k) < 0.) call abort_ice(subname//' negative depth error', file=__FILE__, line=__LINE__) + if (depth(k) < 0.) call abort_ice(subname//' negative depth error') enddo if (my_task==master_task) then @@ -4589,7 +4589,7 @@ subroutine get_bathymetry_popfile do j = 1, ny_block do i = 1, nx_block k = nint(kmt(i,j,iblk)) - if (k > nlevel) call abort_ice(subname//' kmt gt nlevel error', file=__FILE__, line=__LINE__) + if (k > nlevel) call abort_ice(subname//' kmt gt nlevel error') if (k > 0) bathymetry(i,j,iblk) = depth(k) enddo enddo @@ -4621,6 +4621,7 @@ subroutine read_seabedstress_bathy ! use module use ice_read_write + use ice_constants, only: field_loc_center, field_type_scalar ! local variables integer (kind=int_kind) :: & @@ -4644,7 +4645,7 @@ subroutine read_seabedstress_bathy fieldname='Bathymetry' if (my_task == master_task) then - write(nu_diag,*) subname,' reading ',TRIM(fieldname) + write(nu_diag,*) 'reading ',TRIM(fieldname) call icepack_warnings_flush(nu_diag) endif call ice_read_nc(fid_init,1,fieldname,bathymetry,diag, & @@ -4654,7 +4655,7 @@ subroutine read_seabedstress_bathy call ice_close_nc(fid_init) if (my_task == master_task) then - write(nu_diag,*) subname,' closing file ',TRIM(bathymetry_file) + write(nu_diag,*) 'closing file ',TRIM(bathymetry_file) call icepack_warnings_flush(nu_diag) endif diff --git a/cicecore/cicedyn/infrastructure/ice_memusage_gptl.c b/cicecore/cicedyn/infrastructure/ice_memusage_gptl.c index 309c8824b..32b31171d 100644 --- a/cicecore/cicedyn/infrastructure/ice_memusage_gptl.c +++ b/cicecore/cicedyn/infrastructure/ice_memusage_gptl.c @@ -196,7 +196,7 @@ int ice_memusage_gptl (int *size, int *rss, int *share, int *text, int *datastac */ ret = fscanf (fd, "%d %d %d %d %d %d %d", - size, rss, share, text, datastack, &dum, &dum); + size, rss, share, text, datastack, &dum, &dum); ret = fclose (fd); return 0; diff --git a/cicecore/cicedyn/infrastructure/ice_read_write.F90 b/cicecore/cicedyn/infrastructure/ice_read_write.F90 index 4613843b5..6332980f0 100644 --- a/cicecore/cicedyn/infrastructure/ice_read_write.F90 +++ b/cicecore/cicedyn/infrastructure/ice_read_write.F90 @@ -33,8 +33,8 @@ module ice_read_write private integer (kind=int_kind), parameter, private :: & - bits_per_byte = 8 ! number of bits per byte. - ! used to determine RecSize in ice_open + bits_per_byte = 8 ! number of bits per byte. + ! used to determine RecSize in ice_open public :: ice_open, & ice_open_ext, & @@ -51,33 +51,32 @@ module ice_read_write ice_write_ext, & ice_read_vec_nc, & ice_get_ncvarsize, & - ice_check_nc, & ice_close_nc interface ice_write - module procedure ice_write_xyt, & - ice_write_xyzt + module procedure ice_write_xyt, & + ice_write_xyzt end interface interface ice_read - module procedure ice_read_xyt, & - ice_read_xyzt + module procedure ice_read_xyt, & + ice_read_xyzt end interface interface ice_read_nc - module procedure ice_read_nc_xy, & - ice_read_nc_xyz, & - !ice_read_nc_xyf, & - ice_read_nc_point, & - ice_read_nc_1D, & - ice_read_nc_2D, & - ice_read_nc_3D, & - ice_read_nc_z + module procedure ice_read_nc_xy, & + ice_read_nc_xyz, & + !ice_read_nc_xyf, & + ice_read_nc_point, & + ice_read_nc_1D, & + ice_read_nc_2D, & + ice_read_nc_3D, & + ice_read_nc_z end interface interface ice_write_nc - module procedure ice_write_nc_xy, & - ice_write_nc_xyz + module procedure ice_write_nc_xy, & + ice_write_nc_xyz end interface !======================================================================= @@ -94,8 +93,8 @@ module ice_read_write subroutine ice_open(nu, filename, nbits, algn) integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nbits ! no. of bits per variable (0 for sequential access) + nu , & ! unit number + nbits ! no. of bits per variable (0 for sequential access) integer (kind=int_kind), intent(in), optional :: algn integer (kind=int_kind) :: RecSize, Remnant, nbytes @@ -147,15 +146,15 @@ end subroutine ice_open subroutine ice_open_ext(nu, filename, nbits) integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nbits ! no. of bits per variable (0 for sequential access) + nu , & ! unit number + nbits ! no. of bits per variable (0 for sequential access) integer (kind=int_kind) :: RecSize, nbytes character (*) :: filename integer (kind=int_kind) :: & - nx, ny ! grid dimensions including ghost cells + nx, ny ! grid dimensions including ghost cells character(len=*), parameter :: subname = '(ice_open_ext)' @@ -201,22 +200,22 @@ subroutine ice_read_xyt(nu, nrec, work, atype, diag, & use ice_gather_scatter, only: scatter_global integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) + nu , & ! unit number + nrec ! record number (0 for sequential access) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(out) :: & - work ! output array (real, 8-byte) + work ! output array (real, 8-byte) character (len=4), intent(in) :: & - atype ! format for input array - ! (real/integer, 4-byte/8-byte) + atype ! format for input array + ! (real/integer, 4-byte/8-byte) logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) logical (kind=log_kind), optional, intent(in) :: ignore_eof logical (kind=log_kind), optional, intent(out) :: hit_eof @@ -226,7 +225,7 @@ subroutine ice_read_xyt(nu, nrec, work, atype, diag, & integer (kind=int_kind) :: i, j, ios real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array + amin, amax, asum ! min, max values and sum of input array logical (kind=log_kind) :: ignore_eof_use @@ -252,10 +251,9 @@ subroutine ice_read_xyt(nu, nrec, work, atype, diag, & if (my_task == master_task) then - !------------------------------------------------------------------- - ! Read global array according to format atype - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! Read global array according to format atype + !------------------------------------------------------------------- if (present(hit_eof)) hit_eof = .false. if (atype == 'ida4') then @@ -282,7 +280,7 @@ subroutine ice_read_xyt(nu, nrec, work, atype, diag, & ignore_eof_use = .false. endif if (ignore_eof_use) then - ! Read line from file, checking for end-of-file + ! Read line from file, checking for end-of-file read(nu, iostat=ios) ((work_g1(i,j),i=1,nx_global), & j=1,ny_global) if (present(hit_eof)) hit_eof = ios < 0 @@ -302,10 +300,9 @@ subroutine ice_read_xyt(nu, nrec, work, atype, diag, & endif endif - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- if (my_task==master_task .and. diag) then amin = minval(work_g1) amax = maxval(work_g1, mask = work_g1 /= spval_dbl) @@ -313,10 +310,10 @@ subroutine ice_read_xyt(nu, nrec, work, atype, diag, & write(nu_diag,*) subname,' read_global ',nu, nrec, amin, amax, asum endif - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are not updated unless field_loc is present. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Scatter data to individual processors. + ! NOTE: Ghost cells are not updated unless field_loc is present. + !------------------------------------------------------------------- if (present(field_loc)) then call scatter_global(work, work_g1, master_task, distrb_info, & @@ -348,22 +345,22 @@ subroutine ice_read_xyzt(nu, nrec, work, atype, diag, & use ice_domain_size, only: nblyr integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) + nu , & ! unit number + nrec ! record number (0 for sequential access) real (kind=dbl_kind), dimension(nx_block,ny_block,nblyr+2,max_blocks), intent(out) :: & - work ! output array (real, 8-byte) + work ! output array (real, 8-byte) character (len=4), intent(in) :: & - atype ! format for input array - ! (real/integer, 4-byte/8-byte) + atype ! format for input array + ! (real/integer, 4-byte/8-byte) logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) logical (kind=log_kind), optional, intent(in) :: ignore_eof logical (kind=log_kind), optional, intent(out) :: hit_eof @@ -373,7 +370,7 @@ subroutine ice_read_xyzt(nu, nrec, work, atype, diag, & integer (kind=int_kind) :: i, j, k, ios real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array + amin, amax, asum ! min, max values and sum of input array logical (kind=log_kind) :: ignore_eof_use @@ -400,10 +397,9 @@ subroutine ice_read_xyzt(nu, nrec, work, atype, diag, & if (my_task == master_task) then - !------------------------------------------------------------------- - ! Read global array according to format atype - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! Read global array according to format atype + !------------------------------------------------------------------- if (present(hit_eof)) hit_eof = .false. if (atype == 'ida4') then @@ -430,7 +426,7 @@ subroutine ice_read_xyzt(nu, nrec, work, atype, diag, & ignore_eof_use = .false. endif if (ignore_eof_use) then - ! Read line from file, checking for end-of-file + ! Read line from file, checking for end-of-file read(nu, iostat=ios) (((work_g4(i,j,k),i=1,nx_global), & j=1,ny_global), & k=1,nblyr+2) @@ -452,10 +448,9 @@ subroutine ice_read_xyzt(nu, nrec, work, atype, diag, & endif endif - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- if (my_task==master_task .and. diag) then amin = minval(work_g4) amax = maxval(work_g4, mask = work_g4 /= spval_dbl) @@ -463,27 +458,27 @@ subroutine ice_read_xyzt(nu, nrec, work, atype, diag, & write(nu_diag,*) subname,' read_global ',nu, nrec, amin, amax, asum endif - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are not updated unless field_loc is present. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Scatter data to individual processors. + ! NOTE: Ghost cells are not updated unless field_loc is present. + !------------------------------------------------------------------- - do k = 1, nblyr+2 + do k = 1, nblyr+2 - if (present(field_loc)) then - call scatter_global(work(:,:,k,:), work_g4(:,:,k), master_task, distrb_info, & - field_loc, field_type) + if (present(field_loc)) then + call scatter_global(work(:,:,k,:), work_g4(:,:,k), master_task, distrb_info, & + field_loc, field_type) - else + else - call scatter_global(work(:,:,k,:), work_g4(:,:,k), master_task, distrb_info, & - field_loc_noupdate, field_type_noupdate) - endif + call scatter_global(work(:,:,k,:), work_g4(:,:,k), master_task, distrb_info, & + field_loc_noupdate, field_type_noupdate) + endif - enddo !k - deallocate(work_g4) + enddo !k + deallocate(work_g4) - end subroutine ice_read_xyzt + end subroutine ice_read_xyzt !======================================================================= @@ -497,18 +492,18 @@ subroutine ice_read_global (nu, nrec, work_g, atype, diag, & ignore_eof, hit_eof) integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) + nu , & ! unit number + nrec ! record number (0 for sequential access) real (kind=dbl_kind), dimension(nx_global,ny_global), intent(out) :: & - work_g ! output array (real, 8-byte) + work_g ! output array (real, 8-byte) character (len=4) :: & - atype ! format for input array - ! (real/integer, 4-byte/8-byte) + atype ! format for input array + ! (real/integer, 4-byte/8-byte) logical (kind=log_kind) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output logical (kind=log_kind), optional, intent(in) :: ignore_eof logical (kind=log_kind), optional, intent(out) :: hit_eof @@ -518,7 +513,7 @@ subroutine ice_read_global (nu, nrec, work_g, atype, diag, & integer (kind=int_kind) :: i, j, ios real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array + amin, amax, asum ! min, max values and sum of input array logical (kind=log_kind) :: ignore_eof_use @@ -537,10 +532,9 @@ subroutine ice_read_global (nu, nrec, work_g, atype, diag, & if (my_task == master_task) then - !------------------------------------------------------------------- - ! Read global array according to format atype - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! Read global array according to format atype + !------------------------------------------------------------------- if (present(hit_eof)) hit_eof = .false. if (atype == 'ida4') then @@ -584,10 +578,9 @@ subroutine ice_read_global (nu, nrec, work_g, atype, diag, & if (hit_eof) return endif - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- if (my_task == master_task .and. diag) then amin = minval(work_g) amax = maxval(work_g, mask = work_g /= spval_dbl) @@ -609,18 +602,18 @@ subroutine ice_read_ext(nu, nrec, work, atype, diag, & use ice_gather_scatter, only: scatter_global_ext integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) + nu , & ! unit number + nrec ! record number (0 for sequential access) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(out) :: & - work ! output array (real, 8-byte) + work ! output array (real, 8-byte) character (len=4), intent(in) :: & - atype ! format for input array - ! (real/integer, 4-byte/8-byte) + atype ! format for input array + ! (real/integer, 4-byte/8-byte) logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output logical (kind=log_kind), optional, intent(in) :: ignore_eof logical (kind=log_kind), optional, intent(out) :: hit_eof @@ -630,7 +623,7 @@ subroutine ice_read_ext(nu, nrec, work, atype, diag, & integer (kind=int_kind) :: i, j, ios, nx, ny real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array + amin, amax, asum ! min, max values and sum of input array logical (kind=log_kind) :: ignore_eof_use @@ -659,10 +652,9 @@ subroutine ice_read_ext(nu, nrec, work, atype, diag, & if (my_task == master_task) then - !------------------------------------------------------------------- - ! Read global array according to format atype - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! Read global array according to format atype + !------------------------------------------------------------------- if (present(hit_eof)) hit_eof = .false. if (atype == 'ida4') then @@ -689,7 +681,7 @@ subroutine ice_read_ext(nu, nrec, work, atype, diag, & ignore_eof_use = .false. endif if (ignore_eof_use) then - ! Read line from file, checking for end-of-file + ! Read line from file, checking for end-of-file read(nu, iostat=ios) ((work_g1(i,j),i=1,nx), & j=1,ny) if (present(hit_eof)) hit_eof = ios < 0 @@ -709,10 +701,9 @@ subroutine ice_read_ext(nu, nrec, work, atype, diag, & endif endif - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- if (my_task==master_task .and. diag) then amin = minval(work_g1) amax = maxval(work_g1, mask = work_g1 /= spval_dbl) @@ -720,10 +711,10 @@ subroutine ice_read_ext(nu, nrec, work, atype, diag, & write(nu_diag,*) subname,' read_global ',nu, nrec, amin, amax, asum endif - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are always updated - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Scatter data to individual processors. + ! NOTE: Ghost cells are always updated + !------------------------------------------------------------------- call scatter_global_ext(work, work_g1, master_task, distrb_info) @@ -741,25 +732,25 @@ subroutine ice_write_xyt(nu, nrec, work, atype, diag) use ice_gather_scatter, only: gather_global integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) + nu , & ! unit number + nrec ! record number (0 for sequential access) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(in) :: & - work ! input array (real, 8-byte) + work ! input array (real, 8-byte) character (len=4), intent(in) :: & - atype ! format for output array - ! (real/integer, 4-byte/8-byte) + atype ! format for output array + ! (real/integer, 4-byte/8-byte) logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output ! local variables integer (kind=int_kind) :: i, j real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array + amin, amax, asum ! min, max values and sum of input array real (kind=dbl_kind), dimension(:,:), allocatable :: & work_g1 @@ -775,9 +766,9 @@ subroutine ice_write_xyt(nu, nrec, work, atype, diag) character(len=*), parameter :: subname = '(ice_write_xyt)' - !------------------------------------------------------------------- - ! Gather data from individual processors - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Gather data from individual processors + !------------------------------------------------------------------- if (my_task == master_task) then allocate(work_g1(nx_global,ny_global)) @@ -789,10 +780,9 @@ subroutine ice_write_xyt(nu, nrec, work, atype, diag) if (my_task == master_task) then - !------------------------------------------------------------------- - ! Write global array according to format atype - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! Write global array according to format atype + !------------------------------------------------------------------- if (atype == 'ida4') then allocate(work_gi4(nx_global,ny_global)) work_gi4 = nint(work_g1) @@ -816,10 +806,9 @@ subroutine ice_write_xyt(nu, nrec, work, atype, diag) write(nu_diag,*) subname,' ERROR: writing unknown atype ',atype endif - !------------------------------------------------------------------- - ! diagnostics - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! diagnostics + !------------------------------------------------------------------- if (diag) then amin = minval(work_g1) amax = maxval(work_g1, mask = work_g1 /= spval_dbl) @@ -844,25 +833,26 @@ subroutine ice_write_xyzt(nu, nrec, work, atype, diag) use ice_domain_size, only: nblyr integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) + nu , & ! unit number + nrec ! record number (0 for sequential access) - real (kind=dbl_kind), dimension(nx_block,ny_block,nblyr+2,max_blocks), intent(in) :: & - work ! input array (real, 8-byte) + real (kind=dbl_kind), dimension(nx_block,ny_block,nblyr+2,max_blocks), & + intent(in) :: & + work ! input array (real, 8-byte) character (len=4), intent(in) :: & - atype ! format for output array - ! (real/integer, 4-byte/8-byte) + atype ! format for output array + ! (real/integer, 4-byte/8-byte) logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output ! local variables integer (kind=int_kind) :: i, j, k real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array + amin, amax, asum ! min, max values and sum of input array real (kind=dbl_kind), dimension(:,:,:), allocatable :: & work_g4 @@ -878,9 +868,9 @@ subroutine ice_write_xyzt(nu, nrec, work, atype, diag) character(len=*), parameter :: subname = '(ice_write_xyzt)' - !------------------------------------------------------------------- - ! Gather data from individual processors - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Gather data from individual processors + !------------------------------------------------------------------- if (my_task == master_task) then allocate(work_g4(nx_global,ny_global,nblyr+2)) @@ -888,16 +878,15 @@ subroutine ice_write_xyzt(nu, nrec, work, atype, diag) allocate(work_g4(1,1,nblyr+2)) ! to save memory endif do k = 1,nblyr+2 - call gather_global(work_g4(:,:,k), work(:,:,k,:), master_task, & - distrb_info, spc_val=c0) + call gather_global(work_g4(:,:,k), work(:,:,k,:), master_task, & + distrb_info, spc_val=c0) enddo !k if (my_task == master_task) then - !------------------------------------------------------------------- - ! Write global array according to format atype - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! Write global array according to format atype + !------------------------------------------------------------------- if (atype == 'ida4') then allocate(work_gi5(nx_global,ny_global,nblyr+2)) work_gi5 = nint(work_g4) @@ -922,10 +911,9 @@ subroutine ice_write_xyzt(nu, nrec, work, atype, diag) write(nu_diag,*) subname,' ERROR: writing unknown atype ',atype endif - !------------------------------------------------------------------- - ! diagnostics - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! diagnostics + !------------------------------------------------------------------- if (diag) then amin = minval(work_g4) amax = maxval(work_g4, mask = work_g4 /= spval_dbl) @@ -951,25 +939,26 @@ subroutine ice_write_ext(nu, nrec, work, atype, diag) use ice_gather_scatter, only: gather_global_ext integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) + nu , & ! unit number + nrec ! record number (0 for sequential access) - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(in) :: & - work ! input array (real, 8-byte) + real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & + intent(in) :: & + work ! input array (real, 8-byte) character (len=4), intent(in) :: & - atype ! format for output array - ! (real/integer, 4-byte/8-byte) + atype ! format for output array + ! (real/integer, 4-byte/8-byte) logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output ! local variables integer (kind=int_kind) :: i, j, nx, ny real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array + amin, amax, asum ! min, max values and sum of input array real (kind=dbl_kind), dimension(:,:), allocatable :: & work_g1 @@ -985,9 +974,9 @@ subroutine ice_write_ext(nu, nrec, work, atype, diag) character(len=*), parameter :: subname = '(ice_write_ext)' - !------------------------------------------------------------------- - ! Gather data from individual processors - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Gather data from individual processors + !------------------------------------------------------------------- nx = nx_global + 2*nghost ny = ny_global + 2*nghost @@ -1002,10 +991,9 @@ subroutine ice_write_ext(nu, nrec, work, atype, diag) if (my_task == master_task) then - !------------------------------------------------------------------- - ! Write global array according to format atype - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! Write global array according to format atype + !------------------------------------------------------------------- if (atype == 'ida4') then allocate(work_gi4(nx,ny)) work_gi4 = nint(work_g1) @@ -1029,10 +1017,9 @@ subroutine ice_write_ext(nu, nrec, work, atype, diag) write(nu_diag,*) subname,' ERROR: writing unknown atype ',atype endif - !------------------------------------------------------------------- - ! diagnostics - !------------------------------------------------------------------- - + !------------------------------------------------------------------- + ! diagnostics + !------------------------------------------------------------------- if (diag) then amin = minval(work_g1) amax = maxval(work_g1, mask = work_g1 /= spval_dbl) @@ -1054,10 +1041,10 @@ end subroutine ice_write_ext subroutine ice_open_nc(filename, fid) character (char_len_long), intent(in) :: & - filename ! netCDF filename + filename ! netCDF filename integer (kind=int_kind), intent(out) :: & - fid ! unit number + fid ! unit number ! local variables @@ -1065,13 +1052,15 @@ subroutine ice_open_nc(filename, fid) #ifdef USE_NETCDF integer (kind=int_kind) :: & - status ! status variable from netCDF routine + status ! status variable from netCDF routine if (my_task == master_task) then status = nf90_open(filename, NF90_NOWRITE, fid) - call ice_check_nc(status, subname//' ERROR: Cannot open '//trim(filename), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot open '//trim(filename), & + file=__FILE__, line=__LINE__) + endif endif ! my_task = master_task @@ -1098,24 +1087,24 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & use ice_gather_scatter, only: scatter_global, scatter_global_ext integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number + fid , & ! file id + nrec ! record number logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output character (len=*), intent(in) :: & - varname ! field name in netcdf file + varname ! field name in netcdf file real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(out) :: & - work ! output array (real, 8-byte) + work ! output array (real, 8-byte) logical (kind=log_kind), optional, intent(in) :: & - restart_ext ! if true, read extended grid + restart_ext ! if true, read extended grid integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) ! local variables @@ -1124,17 +1113,17 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: integer (kind=int_kind) :: & - varid , & ! variable id - status , & ! status output from netcdf routines - ndims , & ! number of dimensions - dimlen ! dimension size + varid , & ! variable id + status , & ! status output from netcdf routines + ndims , & ! number of dimensions + dimlen ! dimension size integer (kind=int_kind), dimension(10) :: & - dimids ! generic size dimids + dimids ! generic size dimids real (kind=dbl_kind) :: & missingvalue, & - amin, amax, asum ! min, max values and sum of input array + amin, amax, asum ! min, max values and sum of input array real (kind=dbl_kind), dimension(:,:), allocatable :: & work_g1 @@ -1177,54 +1166,67 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & if (my_task == master_task) then - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- status = nf90_inq_varid(fid, trim(varname), varid) - call ice_check_nc(status, subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif - !------------------------------------------------------------- - ! Check nrec axis size - !------------------------------------------------------------- + !------------------------------------------------------------- + ! Check nrec axis size + !------------------------------------------------------------- status = nf90_inquire_variable(fid, varid, ndims=ndims, dimids=dimids) - call ice_check_nc(status, subname//' ERROR: inquire variable dimids '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: inquire variable dimids '//trim(varname), & + file=__FILE__, line=__LINE__) + endif if (ndims > 2) then status = nf90_inquire_dimension(fid, dimids(3), len=dimlen) - call ice_check_nc(status, subname//' ERROR: inquire dimension size 3 '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: inquire dimension size 3 '//trim(varname), & + file=__FILE__, line=__LINE__) + endif if (lnrec > dimlen) then - write(nu_diag,*) subname,' ERROR: not enough records, ',trim(varname),lnrec,dimlen + write(nu_diag,*) subname,' ERROR not enough records, ',trim(varname),lnrec,dimlen call abort_ice(subname//' ERROR: not enough records '//trim(varname), & - file=__FILE__, line=__LINE__) + file=__FILE__, line=__LINE__) endif endif - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- if (orca_halogrid .and. .not. present(restart_ext)) then status = nf90_get_var( fid, varid, work_g2, & - start=(/1,1,lnrec/), count=(/nx_global+2,ny_global+1,1/)) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & + start=(/1,1,lnrec/), & + count=(/nx_global+2,ny_global+1,1/)) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & file=__FILE__, line=__LINE__) + endif work_g1 = work_g2(2:nx_global+1,1:ny_global) else status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,lnrec/), count=(/nx,ny,1/)) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) + start=(/1,1,lnrec/), & + count=(/nx,ny,1/)) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif endif + status = nf90_get_att(fid, varid, "_FillValue", missingvalue) endif ! my_task = master_task - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- if (my_task==master_task .and. diag) then write(nu_diag,'(2a,i8,a,i8,2a)') & @@ -1236,22 +1238,16 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & ! status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) ! write(nu_diag,*) subname,' Dim name = ',trim(dimname),', size = ',dimlen ! enddo - ! optional - missingvalue = spval_dbl - status = nf90_get_att(fid, varid, "_FillValue", missingvalue) -! call ice_check_nc(status, subname//' ERROR: Missing _FillValue', & -! file=__FILE__, line=__LINE__) -! write(nu_diag,*) subname,' missingvalue= ',missingvalue amin = minval(work_g1) amax = maxval(work_g1, mask = work_g1 /= missingvalue) asum = sum (work_g1, mask = work_g1 /= missingvalue) write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum, trim(varname) endif - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are not updated unless field_loc is present. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Scatter data to individual processors. + ! NOTE: Ghost cells are not updated unless field_loc is present. + !------------------------------------------------------------------- if (present(restart_ext)) then if (restart_ext) then @@ -1275,7 +1271,6 @@ subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & if (orca_halogrid .and. .not. present(restart_ext)) deallocate(work_g2) #else - work = c0 ! to satisfy intent(out) attribute call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) #endif @@ -1297,24 +1292,24 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & use ice_gather_scatter, only: scatter_global, scatter_global_ext integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number + fid , & ! file id + nrec ! record number character (len=*), intent(in) :: & - varname ! field name in netcdf file + varname ! field name in netcdf file logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output real (kind=dbl_kind), dimension(nx_block,ny_block,ncat,max_blocks), intent(out) :: & - work ! output array (real, 8-byte) + work ! output array (real, 8-byte) logical (kind=log_kind), optional, intent(in) :: & - restart_ext ! if true, read extended grid + restart_ext ! if true, read extended grid integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) ! local variables @@ -1323,21 +1318,21 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: integer (kind=int_kind) :: & - n, & ! ncat index - varid , & ! variable id - status , & ! status output from netcdf routines - ndims , & ! number of dimensions - dimlen ! dimension size + n, & ! ncat index + varid , & ! variable id + status , & ! status output from netcdf routines + ndims , & ! number of dimensions + dimlen ! dimension size integer (kind=int_kind), dimension(10) :: & - dimids ! generic size dimids + dimids ! generic size dimids real (kind=dbl_kind) :: & - missingvalue, & ! missing value - amin, amax, asum ! min, max values and sum of input array + missingvalue, & ! missing value + amin, amax, asum ! min, max values and sum of input array ! character (char_len) :: & -! dimname ! dimension name +! dimname ! dimension name real (kind=dbl_kind), dimension(:,:,:), allocatable :: & work_g1 @@ -1378,54 +1373,67 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & if (my_task == master_task) then - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- status = nf90_inq_varid(fid, trim(varname), varid) - call ice_check_nc(status, subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif - !------------------------------------------------------------- - ! Check nrec axis size - !------------------------------------------------------------- + !------------------------------------------------------------- + ! Check nrec axis size + !------------------------------------------------------------- status = nf90_inquire_variable(fid, varid, ndims=ndims, dimids=dimids) - call ice_check_nc(status, subname//' ERROR: inquire variable dimids '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: inquire variable dimids '//trim(varname), & + file=__FILE__, line=__LINE__) + endif if (ndims > 3) then status = nf90_inquire_dimension(fid, dimids(4), len=dimlen) - call ice_check_nc(status, subname//' ERROR: inquire dimension size 4 '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: inquire dimension size 4 '//trim(varname), & + file=__FILE__, line=__LINE__) + endif if (lnrec > dimlen) then - write(nu_diag,*) subname,' ERROR: not enough records, ',trim(varname),lnrec,dimlen + write(nu_diag,*) subname,' ERROR not enough records, ',trim(varname),lnrec,dimlen call abort_ice(subname//' ERROR: not enough records '//trim(varname), & - file=__FILE__, line=__LINE__) + file=__FILE__, line=__LINE__) endif endif - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- if (orca_halogrid .and. .not. present(restart_ext)) then status = nf90_get_var( fid, varid, work_g2, & - start=(/1,1,1,lnrec/), count=(/nx_global+2,ny_global+1,ncat,1/)) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) + start=(/1,1,1,lnrec/), & + count=(/nx_global+2,ny_global+1,ncat,1/)) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif work_g1 = work_g2(2:nx_global+1,1:ny_global,:) else status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,1,lnrec/), count=(/nx,ny,ncat,1/)) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) + start=(/1,1,1,lnrec/), & + count=(/nx,ny,ncat,1/)) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif endif + status = nf90_get_att(fid, varid, "_FillValue", missingvalue) endif ! my_task = master_task - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- if (my_task==master_task .and. diag) then write(nu_diag,'(2a,i8,a,i8,2a)') & @@ -1437,12 +1445,6 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & ! status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) ! write(nu_diag,*) subname,' Dim name = ',trim(dimname),', size = ',dimlen ! enddo - ! optional - missingvalue = spval_dbl - status = nf90_get_att(fid, varid, "_FillValue", missingvalue) -! call ice_check_nc(status, subname//' ERROR: Missing _FillValue', & -! file=__FILE__, line=__LINE__) -! write(nu_diag,*) subname,' missingvalue= ',missingvalue do n=1,ncat amin = minval(work_g1(:,:,n)) amax = maxval(work_g1(:,:,n), mask = work_g1(:,:,n) /= missingvalue) @@ -1451,10 +1453,10 @@ subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & enddo endif - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are not updated unless field_loc is present. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Scatter data to individual processors. + ! NOTE: Ghost cells are not updated unless field_loc is present. + !------------------------------------------------------------------- if (present(restart_ext)) then if (restart_ext) then @@ -1507,46 +1509,47 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & use ice_gather_scatter, only: scatter_global, scatter_global_ext integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number + fid , & ! file id + nrec ! record number character (len=*), intent(in) :: & - varname ! field name in netcdf file + varname ! field name in netcdf file logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output - real (kind=dbl_kind), dimension(nx_block,ny_block,nfreq,1,max_blocks), intent(out) :: & - work ! output array (real, 8-byte) + real (kind=dbl_kind), dimension(nx_block,ny_block,nfreq,1,max_blocks), & + intent(out) :: & + work ! output array (real, 8-byte) logical (kind=log_kind), optional, intent(in) :: & - restart_ext ! if true, read extended grid + restart_ext ! if true, read extended grid integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) ! local variables ! netCDF file diagnostics: integer (kind=int_kind) :: & - varid, & ! variable id - status, & ! status output from netcdf routines - ndim, nvar, & ! sizes of netcdf file - id, & ! dimension index - n, & ! ncat index - ndims, & ! number of dimensions - dimlen ! dimension size + varid, & ! variable id + status, & ! status output from netcdf routines + ndim, nvar, & ! sizes of netcdf file + id, & ! dimension index + n, & ! ncat index + ndims, & ! number of dimensions + dimlen ! dimension size integer (kind=int_kind), dimension(10) :: & - dimids ! generic size dimids + dimids ! generic size dimids real (kind=dbl_kind) :: & - missingvalue, & ! missing value - amin, amax, asum ! min, max values and sum of input array + missingvalue, & ! missing value + amin, amax, asum ! min, max values and sum of input array character (char_len) :: & - dimname ! dimension name + dimname ! dimension name real (kind=dbl_kind), dimension(:,:,:), allocatable :: & work_g1 @@ -1590,54 +1593,67 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & if (my_task == master_task) then - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- status = nf90_inq_varid(fid, trim(varname), varid) - call ice_check_nc(status, subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif - !------------------------------------------------------------- - ! Check nrec axis size - !------------------------------------------------------------- + !------------------------------------------------------------- + ! Check nrec axis size + !------------------------------------------------------------- status = nf90_inquire_variable(fid, varid, ndims=ndims, dimids=dimids) - call ice_check_nc(status, subname//' ERROR: inquire variable dimids '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: inquire variable dimids '//trim(varname), & + file=__FILE__, line=__LINE__) + endif if (ndims > 3) then status = nf90_inquire_dimension(fid, dimids(4), len=dimlen) - call ice_check_nc(status, subname//' ERROR: inquire dimension size 4 '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: inquire dimension size 4 '//trim(varname), & + file=__FILE__, line=__LINE__) + endif if (lnrec > dimlen) then - write(nu_diag,*) subname,' ERROR: not enough records, ',trim(varname),lnrec,dimlen + write(nu_diag,*) subname,' ERROR not enough records, ',trim(varname),lnrec,dimlen call abort_ice(subname//' ERROR: not enough records '//trim(varname), & - file=__FILE__, line=__LINE__) + file=__FILE__, line=__LINE__) endif endif - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- if (orca_halogrid .and. .not. present(restart_ext)) then status = nf90_get_var( fid, varid, work_g2, & - start=(/1,1,1,lnrec/), count=(/nx_global+2,ny_global+1,nfreq,1/)) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) + start=(/1,1,1,lnrec/), & + count=(/nx_global+2,ny_global+1,nfreq,1/)) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif work_g1 = work_g2(2:nx_global+1,1:ny_global,:) else status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,1,lnrec/), count=(/nx,ny,nfreq,1/)) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) + start=(/1,1,1,lnrec/), & + count=(/nx,ny,nfreq,1/)) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif endif + status = nf90_get_att(fid, varid, "missing_value", missingvalue) endif ! my_task = master_task - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- if (my_task==master_task .and. diag) then write(nu_diag,'(2a,i8,a,i8,2a)') & @@ -1649,24 +1665,19 @@ subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) write(nu_diag,*) subname,' Dim name = ',trim(dimname),', size = ',dimlen enddo - ! optional - missingvalue = spval_dbl - status = nf90_get_att(fid, varid, "_FillValue", missingvalue) -! call ice_check_nc(status, subname//' ERROR: Missing _FillValue', & -! file=__FILE__, line=__LINE__) -! write(nu_diag,*) subname,' missingvalue= ',missingvalue + write(nu_diag,*) subname,' missingvalue= ',missingvalue do n = 1, nfreq amin = minval(work_g1(:,:,n)) amax = maxval(work_g1(:,:,n), mask = work_g1(:,:,n) /= missingvalue) asum = sum (work_g1(:,:,n), mask = work_g1(:,:,n) /= missingvalue) - write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum, trim(varname) + write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum enddo endif - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are not updated unless field_loc is present. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Scatter data to individual processors. + ! NOTE: Ghost cells are not updated unless field_loc is present. + !------------------------------------------------------------------- if (present(restart_ext)) then if (restart_ext) then @@ -1712,21 +1723,21 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & field_loc, field_type) integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number + fid , & ! file id + nrec ! record number logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output character (char_len), intent(in) :: & - varname ! field name in netcdf file + varname ! field name in netcdf file integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) real (kind=dbl_kind), intent(out) :: & - work ! output variable (real, 8-byte) + work ! output variable (real, 8-byte) ! local variables @@ -1735,67 +1746,76 @@ subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status, & ! status output from netcdf routines - ndim, nvar, & ! sizes of netcdf file - id, & ! dimension index - ndims, & ! number of dimensions - dimlen ! dimension size + varid, & ! netcdf id for field + status, & ! status output from netcdf routines + ndim, nvar, & ! sizes of netcdf file + id, & ! dimension index + ndims, & ! number of dimensions + dimlen ! dimension size integer (kind=int_kind), dimension(10) :: & - dimids ! generic size dimids + dimids ! generic size dimids real (kind=dbl_kind), dimension(1) :: & - workg ! temporary work variable + workg ! temporary work variable integer (kind=int_kind) :: lnrec ! local value of nrec character (char_len) :: & - dimname ! dimension name + dimname ! dimension name lnrec = nrec if (my_task == master_task) then - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- status = nf90_inq_varid(fid, trim(varname), varid) - call ice_check_nc(status, subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif - !------------------------------------------------------------- - ! Check nrec axis size - !------------------------------------------------------------- + !------------------------------------------------------------- + ! Check nrec axis size + !------------------------------------------------------------- status = nf90_inquire_variable(fid, varid, ndims=ndims, dimids=dimids) - call ice_check_nc(status, subname//' ERROR: inquire variable dimids '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: inquire variable dimids '//trim(varname), & + file=__FILE__, line=__LINE__) + endif if (ndims > 0) then status = nf90_inquire_dimension(fid, dimids(1), len=dimlen) - call ice_check_nc(status, subname//' ERROR: inquire dimension size 1 '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: inquire dimension size 1 '//trim(varname), & + file=__FILE__, line=__LINE__) + endif if (lnrec > dimlen) then - write(nu_diag,*) subname,' ERROR: not enough records, ',trim(varname),lnrec,dimlen + write(nu_diag,*) subname,' ERROR not enough records, ',trim(varname),lnrec,dimlen call abort_ice(subname//' ERROR: not enough records '//trim(varname), & - file=__FILE__, line=__LINE__) + file=__FILE__, line=__LINE__) endif endif - !-------------------------------------------------------------- - ! Read point variable - !-------------------------------------------------------------- + !-------------------------------------------------------------- + ! Read point variable + !-------------------------------------------------------------- status = nf90_get_var(fid, varid, workg, & - start= (/ lnrec /), count=(/ 1 /)) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) + start= (/ lnrec /), & + count=(/ 1 /)) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif endif ! my_task = master_task - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- if (my_task==master_task .and. diag) then write(nu_diag,'(2a,i8,a,i8,2a)') & @@ -1828,17 +1848,17 @@ subroutine ice_read_nc_1D(fid, varname, work, diag, & use ice_fileunits, only: nu_diag integer (kind=int_kind), intent(in) :: & - fid , & ! file id - xdim ! field dimensions + fid , & ! file id + xdim ! field dimensions logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output character (char_len), intent(in) :: & - varname ! field name in netcdf file + varname ! field name in netcdf file real (kind=dbl_kind), dimension(:), intent(out) :: & - work ! output array + work ! output array ! local variables @@ -1847,12 +1867,12 @@ subroutine ice_read_nc_1D(fid, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status, & ! status output from netcdf routines - ndim, nvar ! sizes of netcdf file + varid, & ! netcdf id for field + status, & ! status output from netcdf routines + ndim, nvar ! sizes of netcdf file real (kind=dbl_kind), dimension(xdim) :: & - workg ! output array (real, 8-byte) + workg ! output array (real, 8-byte) !-------------------------------------------------------------- @@ -1863,23 +1883,23 @@ subroutine ice_read_nc_1D(fid, varname, work, diag, & call abort_ice (subname//' ERROR: work array wrong size '//trim(varname), & file=__FILE__, line=__LINE__ ) endif - !------------------------------------------------------------- ! Find out ID of required variable !------------------------------------------------------------- status = nf90_inq_varid(fid, trim(varname), varid) - call ice_check_nc(status, subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__ ) + + if (status /= nf90_noerr) then + call abort_ice (subname//' ERROR: Cannot find variable '//trim(varname), & + file=__FILE__, line=__LINE__ ) + endif !-------------------------------------------------------------- ! Read array !-------------------------------------------------------------- - status = nf90_get_var( fid, varid, workg, & - start=(/1/), count=(/xdim/) ) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__ ) + start=(/1/), & + count=(/xdim/) ) work(1:xdim) = workg(1:xdim) !------------------------------------------------------------------- @@ -1895,7 +1915,7 @@ subroutine ice_read_nc_1D(fid, varname, work, diag, & endif endif #else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) work = c0 ! to satisfy intent(out) attribute #endif @@ -1912,17 +1932,17 @@ subroutine ice_read_nc_2D(fid, varname, work, diag, & use ice_fileunits, only: nu_diag integer (kind=int_kind), intent(in) :: & - fid , & ! file id - xdim, ydim ! field dimensions + fid , & ! file id + xdim, ydim ! field dimensions logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output character (char_len), intent(in) :: & - varname ! field name in netcdf file + varname ! field name in netcdf file real (kind=dbl_kind), dimension(:,:), intent(out) :: & - work ! output array + work ! output array ! local variables @@ -1931,12 +1951,12 @@ subroutine ice_read_nc_2D(fid, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status, & ! status output from netcdf routines - ndim, nvar ! sizes of netcdf file + varid, & ! netcdf id for field + status, & ! status output from netcdf routines + ndim, nvar ! sizes of netcdf file real (kind=dbl_kind), dimension(xdim,ydim) :: & - workg ! output array (real, 8-byte) + workg ! output array (real, 8-byte) !-------------------------------------------------------------- @@ -1949,23 +1969,23 @@ subroutine ice_read_nc_2D(fid, varname, work, diag, & call abort_ice (subname//' ERROR: work array wrong size '//trim(varname), & file=__FILE__, line=__LINE__ ) endif - !------------------------------------------------------------- ! Find out ID of required variable !------------------------------------------------------------- status = nf90_inq_varid(fid, trim(varname), varid) - call ice_check_nc(status,subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__ ) + + if (status /= nf90_noerr) then + call abort_ice (subname//' ERROR: Cannot find variable '//trim(varname), & + file=__FILE__, line=__LINE__ ) + endif !-------------------------------------------------------------- ! Read array !-------------------------------------------------------------- - status = nf90_get_var( fid, varid, workg, & - start=(/1,1/), count=(/xdim,ydim/) ) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__ ) + start=(/1,1/), & + count=(/xdim,ydim/) ) work(1:xdim,1:ydim) = workg(1:xdim, 1:ydim) !------------------------------------------------------------------- @@ -1981,7 +2001,7 @@ subroutine ice_read_nc_2D(fid, varname, work, diag, & endif endif #else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) work = c0 ! to satisfy intent(out) attribute #endif @@ -1989,6 +2009,7 @@ subroutine ice_read_nc_2D(fid, varname, work, diag, & end subroutine ice_read_nc_2D !======================================================================= +!======================================================================= ! Written by T. Craig @@ -1998,17 +2019,17 @@ subroutine ice_read_nc_3D(fid, varname, work, diag, & use ice_fileunits, only: nu_diag integer (kind=int_kind), intent(in) :: & - fid , & ! file id - xdim, ydim,zdim ! field dimensions + fid , & ! file id + xdim, ydim,zdim ! field dimensions logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output character (char_len), intent(in) :: & - varname ! field name in netcdf file + varname ! field name in netcdf file real (kind=dbl_kind), dimension(:,:,:), intent(out) :: & - work ! output array + work ! output array ! local variables @@ -2017,12 +2038,12 @@ subroutine ice_read_nc_3D(fid, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status, & ! status output from netcdf routines - ndim, nvar ! sizes of netcdf file + varid, & ! netcdf id for field + status, & ! status output from netcdf routines + ndim, nvar ! sizes of netcdf file real (kind=dbl_kind), dimension(xdim,ydim,zdim) :: & - workg ! output array (real, 8-byte) + workg ! output array (real, 8-byte) !-------------------------------------------------------------- @@ -2037,23 +2058,23 @@ subroutine ice_read_nc_3D(fid, varname, work, diag, & call abort_ice (subname//' ERROR: work array wrong size '//trim(varname), & file=__FILE__, line=__LINE__ ) endif - !------------------------------------------------------------- ! Find out ID of required variable !------------------------------------------------------------- status = nf90_inq_varid(fid, trim(varname), varid) - call ice_check_nc(status, subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__ ) + + if (status /= nf90_noerr) then + call abort_ice (subname//' ERROR: Cannot find variable '//trim(varname), & + file=__FILE__, line=__LINE__ ) + endif !-------------------------------------------------------------- ! Read array !-------------------------------------------------------------- - status = nf90_get_var( fid, varid, workg, & - start=(/1,1,1/), count=(/xdim,ydim,zdim/) ) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__ ) + start=(/1,1,1/), & + count=(/xdim,ydim,zdim/) ) work(1:xdim,1:ydim,1:zdim) = workg(1:xdim, 1:ydim, 1:zdim) !------------------------------------------------------------------- @@ -2069,7 +2090,7 @@ subroutine ice_read_nc_3D(fid, varname, work, diag, & endif endif #else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) work = c0 ! to satisfy intent(out) attribute #endif @@ -2086,42 +2107,42 @@ subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & use ice_domain_size, only: nilyr integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number + fid , & ! file id + nrec ! record number logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output character (char_len), intent(in) :: & - varname ! field name in netcdf file + varname ! field name in netcdf file integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) real (kind=dbl_kind), dimension(nilyr), intent(out) :: & - work ! output array (real, 8-byte) + work ! output array (real, 8-byte) ! local variables #ifdef USE_NETCDF real (kind=dbl_kind), dimension(:), allocatable :: & - work_z + work_z ! netCDF file diagnostics: integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status, & ! status output from netcdf routines - ndim, nvar, & ! sizes of netcdf file - id, & ! dimension index - ndims, & ! number of dimensions - dimlen ! dimension size + varid, & ! netcdf id for field + status, & ! status output from netcdf routines + ndim, nvar, & ! sizes of netcdf file + id, & ! dimension index + ndims, & ! number of dimensions + dimlen ! dimension size integer (kind=int_kind), dimension(10) :: & - dimids ! generic size dimids + dimids ! generic size dimids character (char_len) :: & - dimname ! dimension name + dimname ! dimension name integer (kind=int_kind) :: lnrec ! local value of nrec @@ -2137,45 +2158,54 @@ subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & if (my_task == master_task) then - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- status = nf90_inq_varid(fid, trim(varname), varid) - call ice_check_nc(status, subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif - !------------------------------------------------------------- - ! Check nrec axis size - !------------------------------------------------------------- + !------------------------------------------------------------- + ! Check nrec axis size + !------------------------------------------------------------- status = nf90_inquire_variable(fid, varid, ndims=ndims, dimids=dimids) - call ice_check_nc(status, subname//' ERROR: inquire variable dimids '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: inquire variable dimids '//trim(varname), & + file=__FILE__, line=__LINE__) + endif if (ndims > 1) then status = nf90_inquire_dimension(fid, dimids(2), len=dimlen) - call ice_check_nc(status, subname//' ERROR: inquire dimension size 2 '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: inquire dimension size 2 '//trim(varname), & + file=__FILE__, line=__LINE__) + endif if (lnrec > dimlen) then - write(nu_diag,*) subname,' ERROR: not enough records, ',trim(varname),lnrec,dimlen + write(nu_diag,*) subname,' ERROR not enough records, ',trim(varname),lnrec,dimlen call abort_ice(subname//' ERROR: not enough records '//trim(varname), & - file=__FILE__, line=__LINE__) + file=__FILE__, line=__LINE__) endif endif - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- status = nf90_get_var( fid, varid, work_z, & - start=(/1,lnrec/), count=(/nilyr,1/)) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) + start=(/1,lnrec/), & + count=(/nilyr,1/)) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif endif ! my_task = master_task - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- if (my_task==master_task .and. diag) then write(nu_diag,'(2a,i8,a,i8,2a)') & @@ -2211,21 +2241,21 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & use ice_gather_scatter, only: gather_global, gather_global_ext integer (kind=int_kind), intent(in) :: & - fid , & ! file id - varid , & ! variable id - nrec ! record number + fid , & ! file id + varid , & ! variable id + nrec ! record number logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output logical (kind=log_kind), optional, intent(in) :: & - restart_ext ! if true, write extended grid + restart_ext ! if true, write extended grid real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(in) :: & - work ! output array (real, 8-byte) + work ! output array (real, 8-byte) character (len=*), optional, intent(in) :: & - varname ! variable name + varname ! variable name ! local variables @@ -2234,17 +2264,17 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: integer (kind=int_kind) :: & - status ! status output from netcdf routines -! ndim, nvar, & ! sizes of netcdf file -! id, & ! dimension index -! dimlen ! size of dimension + status ! status output from netcdf routines +! ndim, nvar, & ! sizes of netcdf file +! id, & ! dimension index +! dimlen ! size of dimension real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array + amin, amax, asum ! min, max values and sum of input array character (char_len) :: & - lvarname ! variable name -! dimname ! dimension name + lvarname ! variable name +! dimname ! dimension name real (kind=dbl_kind), dimension(:,:), allocatable :: & work_g1 @@ -2283,19 +2313,19 @@ subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & if (my_task == master_task) then - !-------------------------------------------------------------- - ! Write global array - !-------------------------------------------------------------- + !-------------------------------------------------------------- + ! Write global array + !-------------------------------------------------------------- status = nf90_put_var( fid, varid, work_g1, & - start=(/1,1,nrec/), count=(/nx,ny,1/)) - call ice_check_nc(status, subname//' ERROR: Cannot put variable ', & - file=__FILE__, line=__LINE__ ) + start=(/1,1,nrec/), & + count=(/nx,ny,1/)) + endif ! my_task = master_task - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- if (my_task==master_task .and. diag) then ! write(nu_diag,*) & @@ -2334,21 +2364,21 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & use ice_gather_scatter, only: gather_global, gather_global_ext integer (kind=int_kind), intent(in) :: & - fid , & ! file id - varid , & ! variable id - nrec ! record number + fid , & ! file id + varid , & ! variable id + nrec ! record number logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output logical (kind=log_kind), optional, intent(in) :: & - restart_ext ! if true, read extended grid + restart_ext ! if true, read extended grid real (kind=dbl_kind), dimension(nx_block,ny_block,ncat,max_blocks), intent(in) :: & - work ! output array (real, 8-byte) + work ! output array (real, 8-byte) character (len=*), optional, intent(in) :: & - varname ! variable name + varname ! variable name ! local variables @@ -2357,18 +2387,18 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: integer (kind=int_kind) :: & - n, & ! ncat index - status ! status output from netcdf routines -! ndim, nvar, & ! sizes of netcdf file -! id, & ! dimension index -! dimlen ! size of dimension + n, & ! ncat index + status ! status output from netcdf routines +! ndim, nvar, & ! sizes of netcdf file +! id, & ! dimension index +! dimlen ! size of dimension real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array + amin, amax, asum ! min, max values and sum of input array character (char_len) :: & - lvarname ! variable name -! dimname ! dimension name + lvarname ! variable name +! dimname ! dimension name real (kind=dbl_kind), dimension(:,:,:), allocatable :: & work_g1 @@ -2413,19 +2443,19 @@ subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & if (my_task == master_task) then - !-------------------------------------------------------------- - ! Write global array - !-------------------------------------------------------------- + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- status = nf90_put_var( fid, varid, work_g1, & - start=(/1,1,1,nrec/), count=(/nx,ny,ncat,1/)) - call ice_check_nc(status, subname//' ERROR: Cannot put variable ', & - file=__FILE__, line=__LINE__ ) + start=(/1,1,1,nrec/), & + count=(/nx,ny,ncat,1/)) + endif ! my_task = master_task - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- if (my_task==master_task .and. diag) then ! write(nu_diag,*) & @@ -2468,17 +2498,17 @@ end subroutine ice_write_nc_xyz subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number + fid , & ! file id + nrec ! record number - character (char_len), intent(in) :: & - varname ! field name in netcdf file + character (char_len), intent(in) :: & + varname ! field name in netcdf file real (kind=dbl_kind), dimension(nx_global,ny_global), intent(out) :: & - work_g ! output array (real, 8-byte) + work_g ! output array (real, 8-byte) logical (kind=log_kind) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output ! local variables @@ -2487,17 +2517,17 @@ subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) #ifdef USE_NETCDF ! netCDF file diagnostics: integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status ! status output from netcdf routines -! ndim, nvar, & ! sizes of netcdf file -! id, & ! dimension index -! dimlen ! size of dimension + varid, & ! netcdf id for field + status ! status output from netcdf routines +! ndim, nvar, & ! sizes of netcdf file +! id, & ! dimension index +! dimlen ! size of dimension real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array + amin, amax, asum ! min, max values and sum of input array ! character (char_len) :: & -! dimname ! dimension name +! dimname ! dimension name ! real (kind=dbl_kind), dimension(:,:), allocatable :: & work_g3 @@ -2515,35 +2545,43 @@ subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) if (my_task == master_task) then - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- status = nf90_inq_varid(fid, trim(varname), varid) - call ice_check_nc(status, subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- if (orca_halogrid) then status = nf90_get_var( fid, varid, work_g3, & - start=(/1,1,nrec/), count=(/nx_global+2,ny_global+1,1/)) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) + start=(/1,1,nrec/), & + count=(/nx_global+2,ny_global+1,1/)) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif work_g=work_g3(2:nx_global+1,1:ny_global) else status = nf90_get_var( fid, varid, work_g, & - start=(/1,1,nrec/), count=(/nx_global,ny_global,1/)) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) + start=(/1,1,nrec/), & + count=(/nx_global,ny_global,1/)) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif endif endif ! my_task = master_task - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- if (my_task == master_task .and. diag) then ! write(nu_diag,*) & @@ -2573,47 +2611,13 @@ end subroutine ice_read_global_nc !======================================================================= -! Report a netcdf error -! author: T. Craig - - subroutine ice_check_nc(status, abort_msg, file, line) - integer(kind=int_kind), intent (in) :: status - character (len=*) , intent (in) :: abort_msg - character (len=*) , intent (in), optional :: file - integer(kind=int_kind), intent (in), optional :: line - - ! local variables - - character(len=*), parameter :: subname = '(ice_check_nc)' - -#ifdef USE_NETCDF - if (status /= nf90_noerr) then - if (present(file) .and. present(line)) then - call abort_ice(subname//' '//trim(nf90_strerror(status))//', '//trim(abort_msg), & - file=file, line=line) - elseif (present(file)) then - call abort_ice(subname//' '//trim(nf90_strerror(status))//', '//trim(abort_msg), & - file=file) - else - call abort_ice(subname//' '//trim(nf90_strerror(status))//', '//trim(abort_msg)) - endif - endif -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) -#endif - - end subroutine ice_check_nc - -!======================================================================= - ! Closes a netCDF file ! author: Alison McLaren, Met Office subroutine ice_close_nc(fid) integer (kind=int_kind), intent(in) :: & - fid ! unit number + fid ! unit number ! local variables @@ -2625,8 +2629,6 @@ subroutine ice_close_nc(fid) if (my_task == master_task) then status = nf90_close(fid) - call ice_check_nc(status, subname//' ERROR: Cannot close file ', & - file=__FILE__, line=__LINE__ ) endif #else call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & @@ -2651,25 +2653,25 @@ subroutine ice_read_nc_uv(fid, nrec, nzlev, varname, work, diag, & use ice_gather_scatter, only: scatter_global, scatter_global_ext integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec , & ! record number - nzlev ! z level + fid , & ! file id + nrec , & ! record number + nzlev ! z level logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output character (len=*), intent(in) :: & - varname ! field name in netcdf file + varname ! field name in netcdf file real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(out) :: & - work ! output array (real, 8-byte) + work ! output array (real, 8-byte) logical (kind=log_kind), optional, intent(in) :: & - restart_ext ! if true, read extended grid + restart_ext ! if true, read extended grid integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) ! local variables @@ -2678,17 +2680,17 @@ subroutine ice_read_nc_uv(fid, nrec, nzlev, varname, work, diag, & #ifdef USE_NETCDF ! netCDF file diagnostics: integer (kind=int_kind) :: & - varid , & ! variable id - status ! status output from netcdf routines -! ndim, nvar , & ! sizes of netcdf file -! id, & ! dimension index -! dimlen ! size of dimension + varid , & ! variable id + status ! status output from netcdf routines +! ndim, nvar, & ! sizes of netcdf file +! id, & ! dimension index +! dimlen ! size of dimension real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array + amin, amax, asum ! min, max values and sum of input array ! character (char_len) :: & -! dimname ! dimension name +! dimname ! dimension name real (kind=dbl_kind), dimension(:,:), allocatable :: & work_g1 @@ -2713,28 +2715,33 @@ subroutine ice_read_nc_uv(fid, nrec, nzlev, varname, work, diag, & if (my_task == master_task) then - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- status = nf90_inq_varid(fid, trim(varname), varid) - call ice_check_nc(status, subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,nzlev,nrec/), count=(/nx,ny,1,1/)) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) + start=(/1,1,nzlev,nrec/), & + count=(/nx,ny,1,1/)) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif endif ! my_task = master_task - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! optional diagnostics + !------------------------------------------------------------------- if (my_task==master_task .and. diag) then amin = minval(work_g1) @@ -2743,10 +2750,10 @@ subroutine ice_read_nc_uv(fid, nrec, nzlev, varname, work, diag, & write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum, trim(varname) endif - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are not updated unless field_loc is present. - !------------------------------------------------------------------- + !------------------------------------------------------------------- + ! Scatter data to individual processors. + ! NOTE: Ghost cells are not updated unless field_loc is present. + !------------------------------------------------------------------- if (present(restart_ext)) then if (restart_ext) then @@ -2783,17 +2790,18 @@ end subroutine ice_read_nc_uv subroutine ice_read_vec_nc (fid, nrec, varname, work_g, diag) integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number + fid , & ! file id + nrec ! record number character (char_len), intent(in) :: & - varname ! field name in netcdf file + varname ! field name in netcdf file - real (kind=dbl_kind), dimension(nrec), intent(out) :: & - work_g ! output array (real, 8-byte) + real (kind=dbl_kind), dimension(nrec), & + intent(out) :: & + work_g ! output array (real, 8-byte) logical (kind=log_kind) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output ! local variables @@ -2802,32 +2810,37 @@ subroutine ice_read_vec_nc (fid, nrec, varname, work_g, diag) #ifdef USE_NETCDF ! netCDF file diagnostics: integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status ! status output from netcdf routines + varid, & ! netcdf id for field + status ! status output from netcdf routines real (kind=dbl_kind) :: & - amin, amax ! min, max values of input vector + amin, amax ! min, max values of input vector work_g(:) = c0 if (my_task == master_task) then - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- + !------------------------------------------------------------- + ! Find out ID of required variable + !------------------------------------------------------------- status = nf90_inq_varid(fid, trim(varname), varid) - call ice_check_nc(status, subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- + !-------------------------------------------------------------- + ! Read global array + !-------------------------------------------------------------- status = nf90_get_var( fid, varid, work_g, & - start=(/1/), count=(/nrec/)) - call ice_check_nc(status, subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) + start=(/1/), & + count=(/nrec/)) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & + file=__FILE__, line=__LINE__) + endif endif ! my_task = master_task @@ -2873,22 +2886,26 @@ subroutine ice_get_ncvarsize(fid,varname,recsize) #ifdef USE_NETCDF if (my_task == master_task) then status=nf90_inquire(fid, nDimensions = nDims) - call ice_check_nc(status, subname//' ERROR: inquire nDimensions', & - file=__FILE__, line=__LINE__ ) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: inquire nDimensions', & + file=__FILE__, line=__LINE__ ) + endif do i=1,nDims status = nf90_inquire_dimension(fid,i,name=cvar,len=recsize) - call ice_check_nc(status, subname//' ERROR: inquire len for variable '//trim(cvar), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) then + call abort_ice(subname//' ERROR: inquire len for variable '//trim(cvar), & + file=__FILE__, line=__LINE__) + endif if (trim(cvar) == trim(varname)) exit enddo if (trim(cvar) .ne. trim(varname)) then call abort_ice(subname//' ERROR: Did not find variable '//trim(varname), & - file=__FILE__, line=__LINE__) + file=__FILE__, line=__LINE__) endif endif #else call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) + file=__FILE__, line=__LINE__) recsize = 0 ! to satisfy intent(out) attribute #endif diff --git a/cicecore/cicedyn/infrastructure/ice_restart_driver.F90 b/cicecore/cicedyn/infrastructure/ice_restart_driver.F90 index bd5a49eaf..ffe9ec587 100644 --- a/cicecore/cicedyn/infrastructure/ice_restart_driver.F90 +++ b/cicecore/cicedyn/infrastructure/ice_restart_driver.F90 @@ -55,7 +55,7 @@ subroutine dumpfile(filename_spec) use ice_blocks, only: nx_block, ny_block use ice_domain, only: nblocks use ice_domain_size, only: nilyr, nslyr, ncat, max_blocks - use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask + use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask, kdyn use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & @@ -215,45 +215,52 @@ subroutine dumpfile(filename_spec) !----------------------------------------------------------------- ! ice mask for dynamics !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = c0 - if (iceUmask(i,j,iblk)) work1(i,j,iblk) = c1 - enddo - enddo - enddo - !$OMP END PARALLEL DO - call write_restart_field(nu_dump,0,work1,'ruf8','iceumask',1,diag) - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - + if (kdyn > 0) then !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block work1(i,j,iblk) = c0 - if (iceNmask(i,j,iblk)) work1(i,j,iblk) = c1 + if (iceUmask(i,j,iblk)) work1(i,j,iblk) = c1 enddo enddo enddo !$OMP END PARALLEL DO - call write_restart_field(nu_dump,0,work1,'ruf8','icenmask',1,diag) + call write_restart_field(nu_dump,0,work1,'ruf8','iceumask',1,diag) - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - work1(i,j,iblk) = c0 - if (iceEmask(i,j,iblk)) work1(i,j,iblk) = c1 - enddo + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + if (iceNmask(i,j,iblk)) work1(i,j,iblk) = c1 + enddo + enddo enddo - enddo - !$OMP END PARALLEL DO - call write_restart_field(nu_dump,0,work1,'ruf8','iceemask',1,diag) + !$OMP END PARALLEL DO + call write_restart_field(nu_dump,0,work1,'ruf8','icenmask',1,diag) + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + work1(i,j,iblk) = c0 + if (iceEmask(i,j,iblk)) work1(i,j,iblk) = c1 + enddo + enddo + enddo + !$OMP END PARALLEL DO + call write_restart_field(nu_dump,0,work1,'ruf8','iceemask',1,diag) + endif + else + work1(:,:,:) = c0 + call write_restart_field(nu_dump,0,work1,'ruf8','iceumask',1,diag) + if (grid_ice == 'CD' .or. grid_ice == 'C') then + call write_restart_field(nu_dump,0,work1,'ruf8','icenmask',1,diag) + call write_restart_field(nu_dump,0,work1,'ruf8','iceemask',1,diag) + endif endif ! for mixed layer model @@ -277,7 +284,7 @@ subroutine restartfile (ice_ic) use ice_domain, only: nblocks, halo_info use ice_domain_size, only: nilyr, nslyr, ncat, & max_blocks - use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask + use ice_dyn_shared, only: iceUmask, iceEmask, iceNmask,kdyn use ice_flux, only: scale_factor, swvdr, swvdf, swidr, swidf, & strocnxT_iavg, strocnyT_iavg, sst, frzmlt, & stressp_1, stressp_2, stressp_3, stressp_4, & @@ -524,57 +531,76 @@ subroutine restartfile (ice_ic) !----------------------------------------------------------------- ! ice mask for dynamics !----------------------------------------------------------------- - if (my_task == master_task) & - write(nu_diag,*) 'ice mask for dynamics' - - call read_restart_field(nu_restart,0,work1,'ruf8', & - 'iceumask',1,diag,field_loc_center, field_type_scalar) - - iceUmask(:,:,:) = .false. - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceUmask(i,j,iblk) = .true. - enddo - enddo - enddo - !$OMP END PARALLEL DO + if (kdyn > 0) then - if (grid_ice == 'CD' .or. grid_ice == 'C') then - - if (query_field(nu_restart,'icenmask')) then + if (my_task == master_task) & + write(nu_diag,*) 'ice mask for dynamics' + if (query_field(nu_restart,'iceumask')) then call read_restart_field(nu_restart,0,work1,'ruf8', & - 'icenmask',1,diag,field_loc_center, field_type_scalar) + 'iceumask',1,diag,field_loc_center, field_type_scalar) - iceNmask(:,:,:) = .false. + iceUmask(:,:,:) = .false. !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks do j = 1, ny_block do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceNmask(i,j,iblk) = .true. + if (work1(i,j,iblk) > p5) iceUmask(i,j,iblk) = .true. enddo enddo enddo !$OMP END PARALLEL DO endif - - if (query_field(nu_restart,'iceemask')) then - call read_restart_field(nu_restart,0,work1,'ruf8', & - 'iceemask',1,diag,field_loc_center, field_type_scalar) - - iceEmask(:,:,:) = .false. - !$OMP PARALLEL DO PRIVATE(iblk,i,j) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - if (work1(i,j,iblk) > p5) iceEmask(i,j,iblk) = .true. + if (grid_ice == 'CD' .or. grid_ice == 'C') then + + if (query_field(nu_restart,'icenmask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'icenmask',1,diag,field_loc_center, field_type_scalar) + + iceNmask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) iceNmask(i,j,iblk) = .true. + enddo + enddo enddo + !$OMP END PARALLEL DO + endif + + if (query_field(nu_restart,'iceemask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'iceemask',1,diag,field_loc_center, field_type_scalar) + + iceEmask(:,:,:) = .false. + !$OMP PARALLEL DO PRIVATE(iblk,i,j) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (work1(i,j,iblk) > p5) iceEmask(i,j,iblk) = .true. + enddo + enddo enddo - enddo - !$OMP END PARALLEL DO + !$OMP END PARALLEL DO + endif + endif + else + if (my_task == master_task) & + write(nu_diag,*) 'ice mask for dynamics - not used, however mandatory to read in binary files' + if (query_field(nu_restart,'iceumask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'iceumask',1,diag,field_loc_center, field_type_scalar) + endif + if (grid_ice == 'CD' .or. grid_ice == 'C') then + if (query_field(nu_restart,'icenmask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'icenmask',1,diag,field_loc_center, field_type_scalar) + endif + if (query_field(nu_restart,'iceemask')) then + call read_restart_field(nu_restart,0,work1,'ruf8', & + 'iceemask',1,diag,field_loc_center, field_type_scalar) + endif endif - endif ! set Tsfcn to c0 on land diff --git a/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 index 7d29fc4cc..019ab8ce9 100644 --- a/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 @@ -21,35 +21,16 @@ module ice_history_write - use ice_constants, only: c0, c360, p5, spval, spval_dbl + use ice_constants, only: c0, c360, spval, spval_dbl use ice_fileunits, only: nu_diag use ice_exit, only: abort_ice - use ice_read_write, only: ice_check_nc use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_query_parameters - use ice_kinds_mod, only: int_kind -#ifdef USE_NETCDF - use netcdf -#endif implicit none private - - TYPE coord_attributes ! netcdf coordinate attributes - character (len=11) :: short_name - character (len=45) :: long_name - character (len=30) :: units - END TYPE coord_attributes - - TYPE req_attributes ! req'd netcdf attributes - type (coord_attributes) :: req - character (len=20) :: coordinates - END TYPE req_attributes - public :: ice_write_hist - integer (kind=int_kind) :: imtid,jmtid - !======================================================================= contains @@ -67,7 +48,7 @@ subroutine ice_write_hist (ns) use ice_blocks, only: nx_block, ny_block use ice_broadcast, only: broadcast_scalar use ice_calendar, only: msec, timesecs, idate, idate0, write_ic, & - histfreq, histfreq_n, days_per_year, use_leap_years, dayyr, & + histfreq, days_per_year, use_leap_years, dayyr, & hh_init, mm_init, ss_init use ice_communicate, only: my_task, master_task use ice_domain, only: distrb_info @@ -79,9 +60,13 @@ subroutine ice_write_hist (ns) lont_bounds, latt_bounds, lonu_bounds, latu_bounds, & lonn_bounds, latn_bounds, lone_bounds, late_bounds use ice_history_shared + use ice_restart_shared, only: lcdf64 #ifdef CESMCOUPLED use ice_restart_shared, only: runid #endif +#ifdef USE_NETCDF + use netcdf +#endif integer (kind=int_kind), intent(in) :: ns @@ -92,7 +77,7 @@ subroutine ice_write_hist (ns) real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: work1 integer (kind=int_kind) :: i,k,ic,n,nn, & - ncid,status,kmtidi,kmtids,kmtidb, cmtid,timid,varid, & + ncid,status,imtid,jmtid,kmtidi,kmtids,kmtidb, cmtid,timid,varid, & nvertexid,ivertex,kmtida,iflag, fmtid integer (kind=int_kind), dimension(3) :: dimid integer (kind=int_kind), dimension(4) :: dimidz @@ -100,18 +85,19 @@ subroutine ice_write_hist (ns) integer (kind=int_kind), dimension(3) :: dimid_nverts integer (kind=int_kind), dimension(6) :: dimidex real (kind=dbl_kind) :: ltime2 - character (char_len) :: title, cal_units, cal_att - character (char_len) :: time_period_freq = 'none' - character (char_len_long) :: ncfile + character (char_len) :: title + character (char_len_long) :: ncfile(max_nstrm) real (kind=dbl_kind) :: secday, rad_to_deg - integer (kind=int_kind) :: ind,boundid, lprecision + integer (kind=int_kind) :: ind,boundid + + integer (kind=int_kind) :: lprecision character (char_len) :: start_time,current_date,current_time character (len=8) :: cdate - ! time coord - TYPE(coord_attributes) :: time_coord + ! 8 coordinate variables: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT + INTEGER (kind=int_kind), PARAMETER :: ncoord = 8 ! 4 vertices in each grid cell INTEGER (kind=int_kind), PARAMETER :: nverts = 4 @@ -120,6 +106,17 @@ subroutine ice_write_hist (ns) ! lont_bounds, latt_bounds, lonu_bounds, latu_bounds INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 8 + TYPE coord_attributes ! netcdf coordinate attributes + character (len=11) :: short_name + character (len=45) :: long_name + character (len=20) :: units + END TYPE coord_attributes + + TYPE req_attributes ! req'd netcdf attributes + type (coord_attributes) :: req + character (len=20) :: coordinates + END TYPE req_attributes + TYPE(req_attributes), dimension(nvar_grd) :: var_grd TYPE(coord_attributes), dimension(ncoord) :: var_coord TYPE(coord_attributes), dimension(nvar_verts) :: var_nverts @@ -139,560 +136,606 @@ subroutine ice_write_hist (ns) if (my_task == master_task) then - call construct_filename(ncfile,'nc',ns) + ltime2 = timesecs/secday - ! add local directory path name to ncfile - if (write_ic) then - ncfile = trim(incond_dir)//ncfile - else - ncfile = trim(history_dir)//ncfile - endif + call construct_filename(ncfile(ns),'nc',ns) - ! create file - if (history_format == 'cdf1') then - iflag = nf90_clobber - elseif (history_format == 'cdf2') then - iflag = ior(nf90_clobber,nf90_64bit_offset) - elseif (history_format == 'cdf5') then - iflag = ior(nf90_clobber,nf90_64bit_data) - elseif (history_format == 'hdf5') then - iflag = ior(nf90_clobber,nf90_netcdf4) - else - call abort_ice(subname//' ERROR: history_format not allowed for '//trim(history_format), & - file=__FILE__, line=__LINE__) - endif - status = nf90_create(ncfile, iflag, ncid) - call ice_check_nc(status, subname// ' ERROR: creating history ncfile '//ncfile, & - file=__FILE__, line=__LINE__) - - !----------------------------------------------------------------- - ! define dimensions - !----------------------------------------------------------------- - - if (hist_avg(ns) .and. .not. write_ic) then - status = nf90_def_dim(ncid,'nbnd',2,boundid) - call ice_check_nc(status, subname// ' ERROR: defining dim nbnd', & - file=__FILE__, line=__LINE__) - endif + ! add local directory path name to ncfile + if (write_ic) then + ncfile(ns) = trim(incond_dir)//ncfile(ns) + else + ncfile(ns) = trim(history_dir)//ncfile(ns) + endif - status = nf90_def_dim(ncid,'ni',nx_global,imtid) - call ice_check_nc(status, subname// ' ERROR: defining dim ni', & - file=__FILE__, line=__LINE__) - - status = nf90_def_dim(ncid,'nj',ny_global,jmtid) - call ice_check_nc(status, subname// ' ERROR: defining dim nj', & - file=__FILE__, line=__LINE__) - - status = nf90_def_dim(ncid,'nc',ncat_hist,cmtid) - call ice_check_nc(status, subname// ' ERROR: defining dim nc', & - file=__FILE__, line=__LINE__) - - status = nf90_def_dim(ncid,'nkice',nzilyr,kmtidi) - call ice_check_nc(status, subname// ' ERROR: defining dim nkice', & - file=__FILE__, line=__LINE__) - - status = nf90_def_dim(ncid,'nksnow',nzslyr,kmtids) - call ice_check_nc(status, subname// ' ERROR: defining dim nksnow', & - file=__FILE__, line=__LINE__) - - status = nf90_def_dim(ncid,'nkbio',nzblyr,kmtidb) - call ice_check_nc(status, subname// ' ERROR: defining dim nkbio', & - file=__FILE__, line=__LINE__) - - status = nf90_def_dim(ncid,'nkaer',nzalyr,kmtida) - call ice_check_nc(status, subname// ' ERROR: defining dim nkaer', & - file=__FILE__, line=__LINE__) - - status = nf90_def_dim(ncid,'time',NF90_UNLIMITED,timid) - call ice_check_nc(status, subname// ' ERROR: defining dim time', & - file=__FILE__, line=__LINE__) - - status = nf90_def_dim(ncid,'nvertices',nverts,nvertexid) - call ice_check_nc(status, subname// ' ERROR: defining dim nvertices', & - file=__FILE__, line=__LINE__) - - status = nf90_def_dim(ncid,'nf',nfsd_hist,fmtid) - call ice_check_nc(status, subname// ' ERROR: defining dim nf', & - file=__FILE__, line=__LINE__) - - !----------------------------------------------------------------- - ! define coordinate variables: time, time_bounds - !----------------------------------------------------------------- - - write(cdate,'(i8.8)') idate0 - write(cal_units,'(a,a4,a1,a2,a1,a2,a1,i2.2,a1,i2.2,a1,i2.2)') 'days since ', & - cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' ', & - hh_init,':',mm_init,':',ss_init - - if (days_per_year == 360) then - cal_att='360_day' - elseif (days_per_year == 365 .and. .not.use_leap_years ) then - cal_att='noleap' - elseif (use_leap_years) then - cal_att='Gregorian' - else - call abort_ice(subname//' ERROR: invalid calendar settings', file=__FILE__, line=__LINE__) - endif + ! create file + iflag = nf90_clobber + if (lcdf64) iflag = ior(iflag,nf90_64bit_offset) + status = nf90_create(ncfile(ns), iflag, ncid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: creating history ncfile '//ncfile(ns)) - time_coord = coord_attributes('time', 'time', trim(cal_units)) - call ice_hist_coord_def(ncid, time_coord, nf90_double, (/timid/), varid) + !----------------------------------------------------------------- + ! define dimensions + !----------------------------------------------------------------- - status = nf90_put_att(ncid,varid,'calendar',cal_att) !extra attribute - call ice_check_nc(status, subname//' ERROR: defining att calendar: '//cal_att,file=__FILE__,line=__LINE__) - if (hist_avg(ns) .and. .not. write_ic) then - status = nf90_put_att(ncid,varid,'bounds','time_bounds') - call ice_check_nc(status, subname//' ERROR: defining att bounds time_bounds',file=__FILE__,line=__LINE__) - endif + if (hist_avg) then + status = nf90_def_dim(ncid,'d2',2,boundid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining dim d2') + endif - ! Define coord time_bounds if hist_avg is true - if (hist_avg(ns) .and. .not. write_ic) then - time_coord = coord_attributes('time_bounds', 'time interval endpoints', trim(cal_units)) + status = nf90_def_dim(ncid,'ni',nx_global,imtid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining dim ni') - dimid(1) = boundid - dimid(2) = timid + status = nf90_def_dim(ncid,'nj',ny_global,jmtid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining dim nj') - call ice_hist_coord_def(ncid, time_coord, nf90_double, dimid(1:2), varid) - status = nf90_put_att(ncid,varid,'calendar',cal_att) - call ice_check_nc(status, subname//' ERROR: defining att calendar: '//cal_att,file=__FILE__,line=__LINE__) - endif + status = nf90_def_dim(ncid,'nc',ncat_hist,cmtid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining dim nc') - !----------------------------------------------------------------- - ! define information for required time-invariant variables - !----------------------------------------------------------------- - - do ind = 1, ncoord - select case (ind) - case(n_tlon) - var_coord(ind) = coord_attributes('TLON', & - 'T grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lont_bounds' - case(n_tlat) - var_coord(ind) = coord_attributes('TLAT', & - 'T grid center latitude', 'degrees_north') - coord_bounds(ind) = 'latt_bounds' - case(n_ulon) - var_coord(ind) = coord_attributes('ULON', & - 'U grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lonu_bounds' - case(n_ulat) - var_coord(ind) = coord_attributes('ULAT', & - 'U grid center latitude', 'degrees_north') - coord_bounds(ind) = 'latu_bounds' - case(n_nlon) - var_coord(ind) = coord_attributes('NLON', & - 'N grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lonn_bounds' - case(n_nlat) - var_coord(ind) = coord_attributes('NLAT', & - 'N grid center latitude', 'degrees_north') - coord_bounds(ind) = 'latn_bounds' - case(n_elon) - var_coord(ind) = coord_attributes('ELON', & - 'E grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lone_bounds' - case(n_elat) - var_coord(ind) = coord_attributes('ELAT', & - 'E grid center latitude', 'degrees_north') - coord_bounds(ind) = 'late_bounds' - end select - end do - - var_grdz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') - var_grdz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') - var_grdz(3) = coord_attributes('VGRDs', 'vertical snow levels', '1') - var_grdz(4) = coord_attributes('VGRDb', 'vertical ice-bio levels', '1') - var_grdz(5) = coord_attributes('VGRDa', 'vertical snow-ice-bio levels', '1') - var_grdz(6) = coord_attributes('NFSD', 'category floe size (center)', 'm') - - !----------------------------------------------------------------- - ! define information for optional time-invariant variables - !----------------------------------------------------------------- - - var_grd(n_tmask)%req = coord_attributes('tmask', & - 'mask of T grid cells, 0 = land, 1 = ocean', 'unitless') - var_grd(n_tmask)%coordinates = 'TLON TLAT' - var_grd(n_umask)%req = coord_attributes('umask', & - 'mask of U grid cells, 0 = land, 1 = ocean', 'unitless') - var_grd(n_umask)%coordinates = 'ULON ULAT' - var_grd(n_nmask)%req = coord_attributes('nmask', & - 'mask of N grid cells, 0 = land, 1 = ocean', 'unitless') - var_grd(n_nmask)%coordinates = 'NLON NLAT' - var_grd(n_emask)%req = coord_attributes('emask', & - 'mask of E grid cells, 0 = land, 1 = ocean', 'unitless') - var_grd(n_emask)%coordinates = 'ELON ELAT' - - var_grd(n_tarea)%req = coord_attributes('tarea', & - 'area of T grid cells', 'm^2') - var_grd(n_tarea)%coordinates = 'TLON TLAT' - var_grd(n_uarea)%req = coord_attributes('uarea', & - 'area of U grid cells', 'm^2') - var_grd(n_uarea)%coordinates = 'ULON ULAT' - var_grd(n_narea)%req = coord_attributes('narea', & - 'area of N grid cells', 'm^2') - var_grd(n_narea)%coordinates = 'NLON NLAT' - var_grd(n_earea)%req = coord_attributes('earea', & - 'area of E grid cells', 'm^2') - var_grd(n_earea)%coordinates = 'ELON ELAT' - - var_grd(n_blkmask)%req = coord_attributes('blkmask', & - 'block id of T grid cells, mytask + iblk/100', 'unitless') - var_grd(n_blkmask)%coordinates = 'TLON TLAT' - - var_grd(n_dxt)%req = coord_attributes('dxt', & - 'T cell width through middle', 'm') - var_grd(n_dxt)%coordinates = 'TLON TLAT' - var_grd(n_dyt)%req = coord_attributes('dyt', & - 'T cell height through middle', 'm') - var_grd(n_dyt)%coordinates = 'TLON TLAT' - var_grd(n_dxu)%req = coord_attributes('dxu', & - 'U cell width through middle', 'm') - var_grd(n_dxu)%coordinates = 'ULON ULAT' - var_grd(n_dyu)%req = coord_attributes('dyu', & - 'U cell height through middle', 'm') - var_grd(n_dyu)%coordinates = 'ULON ULAT' - var_grd(n_dxn)%req = coord_attributes('dxn', & - 'N cell width through middle', 'm') - var_grd(n_dxn)%coordinates = 'NLON NLAT' - var_grd(n_dyn)%req = coord_attributes('dyn', & - 'N cell height through middle', 'm') - var_grd(n_dyn)%coordinates = 'NLON NLAT' - var_grd(n_dxe)%req = coord_attributes('dxe', & - 'E cell width through middle', 'm') - var_grd(n_dxe)%coordinates = 'ELON ELAT' - var_grd(n_dye)%req = coord_attributes('dye', & - 'E cell height through middle', 'm') - var_grd(n_dye)%coordinates = 'ELON ELAT' - - var_grd(n_HTN)%req = coord_attributes('HTN', & - 'T cell width on North side','m') - var_grd(n_HTN)%coordinates = 'TLON TLAT' - var_grd(n_HTE)%req = coord_attributes('HTE', & - 'T cell width on East side', 'm') - var_grd(n_HTE)%coordinates = 'TLON TLAT' - var_grd(n_ANGLE)%req = coord_attributes('ANGLE', & - 'angle grid makes with latitude line on U grid', & - 'radians') - var_grd(n_ANGLE)%coordinates = 'ULON ULAT' - var_grd(n_ANGLET)%req = coord_attributes('ANGLET', & - 'angle grid makes with latitude line on T grid', & - 'radians') - var_grd(n_ANGLET)%coordinates = 'TLON TLAT' - - ! These fields are required for CF compliance - ! dimensions (nx,ny,nverts) - var_nverts(n_lont_bnds) = coord_attributes('lont_bounds', & - 'longitude boundaries of T cells', 'degrees_east') - var_nverts(n_latt_bnds) = coord_attributes('latt_bounds', & - 'latitude boundaries of T cells', 'degrees_north') - var_nverts(n_lonu_bnds) = coord_attributes('lonu_bounds', & - 'longitude boundaries of U cells', 'degrees_east') - var_nverts(n_latu_bnds) = coord_attributes('latu_bounds', & - 'latitude boundaries of U cells', 'degrees_north') - var_nverts(n_lonn_bnds) = coord_attributes('lonn_bounds', & - 'longitude boundaries of N cells', 'degrees_east') - var_nverts(n_latn_bnds) = coord_attributes('latn_bounds', & - 'latitude boundaries of N cells', 'degrees_north') - var_nverts(n_lone_bnds) = coord_attributes('lone_bounds', & - 'longitude boundaries of E cells', 'degrees_east') - var_nverts(n_late_bnds) = coord_attributes('late_bounds', & - 'latitude boundaries of E cells', 'degrees_north') - - !----------------------------------------------------------------- - ! define attributes for time-invariant variables - !----------------------------------------------------------------- - - dimid(1) = imtid - dimid(2) = jmtid - dimid(3) = timid - - do i = 1, ncoord - if(icoord(i)) then - call ice_hist_coord_def(ncid, var_coord(i), lprecision, dimid(1:2), varid) - call ice_write_hist_fill(ncid,varid,var_coord(i)%short_name,history_precision) - if (var_coord(i)%short_name == 'ULAT') then - status = nf90_put_att(ncid,varid,'comment', & - 'Latitude of NE corner of T grid cell') - call ice_check_nc(status, subname// ' ERROR: defining comment for '//var_coord(i)%short_name, & - file=__FILE__, line=__LINE__) - endif - if (f_bounds) then - status = nf90_put_att(ncid, varid, 'bounds', coord_bounds(i)) - call ice_check_nc(status, subname// ' ERROR: defining bounds for '//var_coord(i)%short_name, & - file=__FILE__, line=__LINE__) - endif - endif - enddo - - ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR, NZALYR, NFSD) - dimidex(1)=cmtid - dimidex(2)=kmtidi - dimidex(3)=kmtids - dimidex(4)=kmtidb - dimidex(5)=kmtida - dimidex(6)=fmtid - - do i = 1, nvar_grdz - if (igrdz(i)) then - call ice_hist_coord_def(ncid, var_grdz(i), lprecision, dimidex(i:i), varid) - endif - enddo - - do i = 1, nvar_grd - if (igrd(i)) then - call ice_hist_coord_def(ncid, var_grd(i)%req, lprecision, dimid(1:2), varid) - status = nf90_put_att(ncid, varid, 'coordinates', var_grd(i)%coordinates) - call ice_check_nc(status, subname// ' ERROR: defining coordinates for '//var_grd(i)%req%short_name, & - file=__FILE__, line=__LINE__) - call ice_write_hist_fill(ncid,varid,var_grd(i)%req%short_name,history_precision) - endif - enddo - - ! Fields with dimensions (nverts,nx,ny) - dimid_nverts(1) = nvertexid - dimid_nverts(2) = imtid - dimid_nverts(3) = jmtid - do i = 1, nvar_verts - if (f_bounds) then - call ice_hist_coord_def(ncid, var_nverts(i), lprecision, dimid_nverts, varid) - call ice_write_hist_fill(ncid,varid,var_nverts(i)%short_name,history_precision) - endif - enddo - - !----------------------------------------------------------------- - ! define attributes for time-variant variables - !----------------------------------------------------------------- - - do n=1,num_avail_hist_fields_2D - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(ncid, avail_hist_fields(n),lprecision, dimid,ns) - endif - enddo ! num_avail_hist_fields_2D - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = cmtid - dimidz(4) = timid - - do n = n2D + 1, n3Dccum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(ncid, avail_hist_fields(n),lprecision, dimidz,ns) - endif - enddo ! num_avail_hist_fields_3Dc - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = kmtidi - dimidz(4) = timid - - do n = n3Dccum + 1, n3Dzcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(ncid, avail_hist_fields(n),lprecision, dimidz,ns) - endif - enddo ! num_avail_hist_fields_3Dz - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = kmtidb - dimidz(4) = timid - - do n = n3Dzcum + 1, n3Dbcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(ncid, avail_hist_fields(n),lprecision, dimidz,ns) - endif - enddo ! num_avail_hist_fields_3Db - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = kmtida - dimidz(4) = timid - - do n = n3Dbcum + 1, n3Dacum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(ncid, avail_hist_fields(n),lprecision, dimidz,ns) - endif - enddo ! num_avail_hist_fields_3Da - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = fmtid - dimidz(4) = timid - - do n = n3Dacum + 1, n3Dfcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(ncid, avail_hist_fields(n),lprecision, dimidz,ns) - endif - enddo ! num_avail_hist_fields_3Df - - dimidcz(1) = imtid - dimidcz(2) = jmtid - dimidcz(3) = kmtidi - dimidcz(4) = cmtid - dimidcz(5) = timid - - do n = n3Dfcum + 1, n4Dicum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(ncid, avail_hist_fields(n),lprecision, dimidcz,ns) - endif - enddo ! num_avail_hist_fields_4Di - - dimidcz(1) = imtid - dimidcz(2) = jmtid - dimidcz(3) = kmtids - dimidcz(4) = cmtid - dimidcz(5) = timid - - do n = n4Dicum + 1, n4Dscum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(ncid, avail_hist_fields(n),lprecision, dimidcz,ns) - endif - enddo ! num_avail_hist_fields_4Ds - - dimidcz(1) = imtid - dimidcz(2) = jmtid - dimidcz(3) = fmtid - dimidcz(4) = cmtid - dimidcz(5) = timid - - do n = n4Dscum + 1, n4Dfcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(ncid, avail_hist_fields(n),lprecision, & - ! dimidcz, ns) - dimidcz(1:4),ns) ! ferret - endif - enddo ! num_avail_hist_fields_4Df + status = nf90_def_dim(ncid,'nkice',nzilyr,kmtidi) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining dim nki') + + status = nf90_def_dim(ncid,'nksnow',nzslyr,kmtids) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining dim nks') + + status = nf90_def_dim(ncid,'nkbio',nzblyr,kmtidb) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining dim nkb') + + status = nf90_def_dim(ncid,'nkaer',nzalyr,kmtida) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining dim nka') + + status = nf90_def_dim(ncid,'time',NF90_UNLIMITED,timid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining dim time') + + status = nf90_def_dim(ncid,'nvertices',nverts,nvertexid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining dim nverts') + + status = nf90_def_dim(ncid,'nf',nfsd_hist,fmtid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining dim nf') + + !----------------------------------------------------------------- + ! define coordinate variables + !----------------------------------------------------------------- + + status = nf90_def_var(ncid,'time',nf90_double,timid,varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining var time') + + status = nf90_put_att(ncid,varid,'long_name','model time') + if (status /= nf90_noerr) call abort_ice(subname// & + 'ice Error: time long_name') + + write(cdate,'(i8.8)') idate0 + write(title,'(a,a4,a1,a2,a1,a2,a1,i2.2,a1,i2.2,a1,i2.2)') 'days since ', & + cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' ', & + hh_init,':',mm_init,':',ss_init + status = nf90_put_att(ncid,varid,'units',title) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: time units') + + if (days_per_year == 360) then + status = nf90_put_att(ncid,varid,'calendar','360_day') + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: time calendar') + elseif (days_per_year == 365 .and. .not.use_leap_years ) then + status = nf90_put_att(ncid,varid,'calendar','NoLeap') + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: time calendar') + elseif (use_leap_years) then + status = nf90_put_att(ncid,varid,'calendar','Gregorian') + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: time calendar') + else + call abort_ice(subname//'ERROR: invalid calendar settings') + endif + + if (hist_avg) then + status = nf90_put_att(ncid,varid,'bounds','time_bounds') + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: time bounds') + endif + + !----------------------------------------------------------------- + ! Define attributes for time bounds if hist_avg is true + !----------------------------------------------------------------- + + if (hist_avg) then + dimid(1) = boundid + dimid(2) = timid + status = nf90_def_var(ncid,'time_bounds',lprecision,dimid(1:2),varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining var time_bounds') + status = nf90_put_att(ncid,varid,'long_name', & + 'boundaries for time-averaging interval') + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: time_bounds long_name') + write(cdate,'(i8.8)') idate0 + write(title,'(a,a4,a1,a2,a1,a2,a1,i2.2,a1,i2.2,a1,i2.2)') 'days since ', & + cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' ', & + hh_init,':',mm_init,':',ss_init + status = nf90_put_att(ncid,varid,'units',title) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: time_bounds units') + endif + + !----------------------------------------------------------------- + ! define information for required time-invariant variables + !----------------------------------------------------------------- - !----------------------------------------------------------------- - ! global attributes - !----------------------------------------------------------------- - ! ... the user should change these to something useful ... - !----------------------------------------------------------------- + ind = 0 + ind = ind + 1 + var_coord(ind) = coord_attributes('TLON', & + 'T grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lont_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('TLAT', & + 'T grid center latitude', 'degrees_north') + coord_bounds(ind) = 'latt_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ULON', & + 'U grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lonu_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ULAT', & + 'U grid center latitude', 'degrees_north') + coord_bounds(ind) = 'latu_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('NLON', & + 'N grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lonn_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('NLAT', & + 'N grid center latitude', 'degrees_north') + coord_bounds(ind) = 'latn_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ELON', & + 'E grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lone_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ELAT', & + 'E grid center latitude', 'degrees_north') + coord_bounds(ind) = 'late_bounds' + + var_grdz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') + var_grdz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') + var_grdz(3) = coord_attributes('VGRDs', 'vertical snow levels', '1') + var_grdz(4) = coord_attributes('VGRDb', 'vertical ice-bio levels', '1') + var_grdz(5) = coord_attributes('VGRDa', 'vertical snow-ice-bio levels', '1') + var_grdz(6) = coord_attributes('NFSD', 'category floe size (center)', 'm') + + !----------------------------------------------------------------- + ! define information for optional time-invariant variables + !----------------------------------------------------------------- + + var_grd(n_tmask)%req = coord_attributes('tmask', & + 'mask of T grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_tmask)%coordinates = 'TLON TLAT' + var_grd(n_umask)%req = coord_attributes('umask', & + 'mask of U grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_umask)%coordinates = 'ULON ULAT' + var_grd(n_nmask)%req = coord_attributes('nmask', & + 'mask of N grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_nmask)%coordinates = 'NLON NLAT' + var_grd(n_emask)%req = coord_attributes('emask', & + 'mask of E grid cells, 0 = land, 1 = ocean', 'unitless') + var_grd(n_emask)%coordinates = 'ELON ELAT' + + var_grd(n_tarea)%req = coord_attributes('tarea', & + 'area of T grid cells', 'm^2') + var_grd(n_tarea)%coordinates = 'TLON TLAT' + var_grd(n_uarea)%req = coord_attributes('uarea', & + 'area of U grid cells', 'm^2') + var_grd(n_uarea)%coordinates = 'ULON ULAT' + var_grd(n_narea)%req = coord_attributes('narea', & + 'area of N grid cells', 'm^2') + var_grd(n_narea)%coordinates = 'NLON NLAT' + var_grd(n_earea)%req = coord_attributes('earea', & + 'area of E grid cells', 'm^2') + var_grd(n_earea)%coordinates = 'ELON ELAT' + + var_grd(n_blkmask)%req = coord_attributes('blkmask', & + 'block id of T grid cells, mytask + iblk/100', 'unitless') + var_grd(n_blkmask)%coordinates = 'TLON TLAT' + + var_grd(n_dxt)%req = coord_attributes('dxt', & + 'T cell width through middle', 'm') + var_grd(n_dxt)%coordinates = 'TLON TLAT' + var_grd(n_dyt)%req = coord_attributes('dyt', & + 'T cell height through middle', 'm') + var_grd(n_dyt)%coordinates = 'TLON TLAT' + var_grd(n_dxu)%req = coord_attributes('dxu', & + 'U cell width through middle', 'm') + var_grd(n_dxu)%coordinates = 'ULON ULAT' + var_grd(n_dyu)%req = coord_attributes('dyu', & + 'U cell height through middle', 'm') + var_grd(n_dyu)%coordinates = 'ULON ULAT' + var_grd(n_dxn)%req = coord_attributes('dxn', & + 'N cell width through middle', 'm') + var_grd(n_dxn)%coordinates = 'NLON NLAT' + var_grd(n_dyn)%req = coord_attributes('dyn', & + 'N cell height through middle', 'm') + var_grd(n_dyn)%coordinates = 'NLON NLAT' + var_grd(n_dxe)%req = coord_attributes('dxe', & + 'E cell width through middle', 'm') + var_grd(n_dxe)%coordinates = 'ELON ELAT' + var_grd(n_dye)%req = coord_attributes('dye', & + 'E cell height through middle', 'm') + var_grd(n_dye)%coordinates = 'ELON ELAT' + + var_grd(n_HTN)%req = coord_attributes('HTN', & + 'T cell width on North side','m') + var_grd(n_HTN)%coordinates = 'TLON TLAT' + var_grd(n_HTE)%req = coord_attributes('HTE', & + 'T cell width on East side', 'm') + var_grd(n_HTE)%coordinates = 'TLON TLAT' + var_grd(n_ANGLE)%req = coord_attributes('ANGLE', & + 'angle grid makes with latitude line on U grid', & + 'radians') + var_grd(n_ANGLE)%coordinates = 'ULON ULAT' + var_grd(n_ANGLET)%req = coord_attributes('ANGLET', & + 'angle grid makes with latitude line on T grid', & + 'radians') + var_grd(n_ANGLET)%coordinates = 'TLON TLAT' + + ! These fields are required for CF compliance + ! dimensions (nx,ny,nverts) + var_nverts(n_lont_bnds) = coord_attributes('lont_bounds', & + 'longitude boundaries of T cells', 'degrees_east') + var_nverts(n_latt_bnds) = coord_attributes('latt_bounds', & + 'latitude boundaries of T cells', 'degrees_north') + var_nverts(n_lonu_bnds) = coord_attributes('lonu_bounds', & + 'longitude boundaries of U cells', 'degrees_east') + var_nverts(n_latu_bnds) = coord_attributes('latu_bounds', & + 'latitude boundaries of U cells', 'degrees_north') + var_nverts(n_lonn_bnds) = coord_attributes('lonn_bounds', & + 'longitude boundaries of N cells', 'degrees_east') + var_nverts(n_latn_bnds) = coord_attributes('latn_bounds', & + 'latitude boundaries of N cells', 'degrees_north') + var_nverts(n_lone_bnds) = coord_attributes('lone_bounds', & + 'longitude boundaries of E cells', 'degrees_east') + var_nverts(n_late_bnds) = coord_attributes('late_bounds', & + 'latitude boundaries of E cells', 'degrees_north') + + !----------------------------------------------------------------- + ! define attributes for time-invariant variables + !----------------------------------------------------------------- + + dimid(1) = imtid + dimid(2) = jmtid + dimid(3) = timid + + do i = 1, ncoord + status = nf90_def_var(ncid, var_coord(i)%short_name, lprecision, & + dimid(1:2), varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining short_name for '//var_coord(i)%short_name) + status = nf90_put_att(ncid,varid,'long_name',var_coord(i)%long_name) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining long_name for '//var_coord(i)%short_name) + status = nf90_put_att(ncid, varid, 'units', var_coord(i)%units) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining units for '//var_coord(i)%short_name) + call ice_write_hist_fill(ncid,varid,var_coord(i)%short_name,history_precision) + if (var_coord(i)%short_name == 'ULAT') then + status = nf90_put_att(ncid,varid,'comment', & + 'Latitude of NE corner of T grid cell') + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining comment for '//var_coord(i)%short_name) + endif + if (f_bounds) then + status = nf90_put_att(ncid, varid, 'bounds', coord_bounds(i)) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining bounds for '//var_coord(i)%short_name) + endif + enddo + + ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR, NZALYR, NFSD) + dimidex(1)=cmtid + dimidex(2)=kmtidi + dimidex(3)=kmtids + dimidex(4)=kmtidb + dimidex(5)=kmtida + dimidex(6)=fmtid + + do i = 1, nvar_grdz + if (igrdz(i)) then + status = nf90_def_var(ncid, var_grdz(i)%short_name, & + lprecision, dimidex(i), varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining short_name for '//var_grdz(i)%short_name) + status = nf90_put_att(ncid,varid,'long_name',var_grdz(i)%long_name) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining long_name for '//var_grdz(i)%short_name) + status = nf90_put_att(ncid, varid, 'units', var_grdz(i)%units) + if (Status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining units for '//var_grdz(i)%short_name) + endif + enddo + + do i = 1, nvar_grd + if (igrd(i)) then + status = nf90_def_var(ncid, var_grd(i)%req%short_name, & + lprecision, dimid(1:2), varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining variable '//var_grd(i)%req%short_name) + status = nf90_put_att(ncid,varid, 'long_name', var_grd(i)%req%long_name) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining long_name for '//var_grd(i)%req%short_name) + status = nf90_put_att(ncid, varid, 'units', var_grd(i)%req%units) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining units for '//var_grd(i)%req%short_name) + status = nf90_put_att(ncid, varid, 'coordinates', var_grd(i)%coordinates) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining coordinates for '//var_grd(i)%req%short_name) + call ice_write_hist_fill(ncid,varid,var_grd(i)%req%short_name,history_precision) + endif + enddo + + ! Fields with dimensions (nverts,nx,ny) + dimid_nverts(1) = nvertexid + dimid_nverts(2) = imtid + dimid_nverts(3) = jmtid + do i = 1, nvar_verts + if (f_bounds) then + status = nf90_def_var(ncid, var_nverts(i)%short_name, & + lprecision,dimid_nverts, varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining variable '//var_nverts(i)%short_name) + status = nf90_put_att(ncid,varid, 'long_name', var_nverts(i)%long_name) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining long_name for '//var_nverts(i)%short_name) + status = nf90_put_att(ncid, varid, 'units', var_nverts(i)%units) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining units for '//var_nverts(i)%short_name) + call ice_write_hist_fill(ncid,varid,var_nverts(i)%short_name,history_precision) + endif + enddo + + do n=1,num_avail_hist_fields_2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & + lprecision, dimid, varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining variable '//avail_hist_fields(n)%vname) + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_2D + + dimidz(1) = imtid + dimidz(2) = jmtid + dimidz(3) = cmtid + dimidz(4) = timid + + do n = n2D + 1, n3Dccum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & + lprecision, dimidz, varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining variable '//avail_hist_fields(n)%vname) + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_3Dc + + dimidz(1) = imtid + dimidz(2) = jmtid + dimidz(3) = kmtidi + dimidz(4) = timid + + do n = n3Dccum + 1, n3Dzcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & + lprecision, dimidz, varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining variable '//avail_hist_fields(n)%vname) + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_3Dz + + dimidz(1) = imtid + dimidz(2) = jmtid + dimidz(3) = kmtidb + dimidz(4) = timid + + do n = n3Dzcum + 1, n3Dbcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & + lprecision, dimidz, varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining variable '//avail_hist_fields(n)%vname) + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_3Db + + dimidz(1) = imtid + dimidz(2) = jmtid + dimidz(3) = kmtida + dimidz(4) = timid + + do n = n3Dbcum + 1, n3Dacum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & + lprecision, dimidz, varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining variable '//avail_hist_fields(n)%vname) + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_3Da + + dimidz(1) = imtid + dimidz(2) = jmtid + dimidz(3) = fmtid + dimidz(4) = timid + + do n = n3Dacum + 1, n3Dfcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & + lprecision, dimidz, varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining variable '//avail_hist_fields(n)%vname) + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_3Df + + dimidcz(1) = imtid + dimidcz(2) = jmtid + dimidcz(3) = kmtidi + dimidcz(4) = cmtid + dimidcz(5) = timid + + do n = n3Dfcum + 1, n4Dicum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & +! lprecision, dimidcz, varid) + lprecision, dimidcz(1:4), varid) ! ferret + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining variable '//avail_hist_fields(n)%vname) + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_4Di + + dimidcz(1) = imtid + dimidcz(2) = jmtid + dimidcz(3) = kmtids + dimidcz(4) = cmtid + dimidcz(5) = timid + + do n = n4Dicum + 1, n4Dscum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & +! lprecision, dimidcz, varid) + lprecision, dimidcz(1:4), varid) ! ferret + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining variable '//avail_hist_fields(n)%vname) + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_4Ds + + dimidcz(1) = imtid + dimidcz(2) = jmtid + dimidcz(3) = fmtid + dimidcz(4) = cmtid + dimidcz(5) = timid + + do n = n4Dscum + 1, n4Dfcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & +! lprecision, dimidcz, varid) + lprecision, dimidcz(1:4), varid) ! ferret + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining variable '//avail_hist_fields(n)%vname) + call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_4Df + + !----------------------------------------------------------------- + ! global attributes + !----------------------------------------------------------------- + ! ... the user should change these to something useful ... + !----------------------------------------------------------------- #ifdef CESMCOUPLED - status = nf90_put_att(ncid,nf90_global,'title',runid) - call ice_check_nc(status, subname// ' ERROR: in global attribute title', & - file=__FILE__, line=__LINE__) + status = nf90_put_att(ncid,nf90_global,'title',runid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: in global attribute title') #else - title = 'sea ice model output for CICE' - status = nf90_put_att(ncid,nf90_global,'title',title) - call ice_check_nc(status, subname// ' ERROR: in global attribute title', & - file=__FILE__, line=__LINE__) + title = 'sea ice model output for CICE' + status = nf90_put_att(ncid,nf90_global,'title',title) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: in global attribute title') #endif - title = 'Diagnostic and Prognostic Variables' - status = nf90_put_att(ncid,nf90_global,'contents',title) - call ice_check_nc(status, subname// ' ERROR: global attribute contents', & - file=__FILE__, line=__LINE__) - - write(title,'(2a)') 'Los Alamos Sea Ice Model, ', trim(version_name) - status = nf90_put_att(ncid,nf90_global,'source',title) - call ice_check_nc(status, subname// ' ERROR: global attribute source', & - file=__FILE__, line=__LINE__) - - if (use_leap_years) then - write(title,'(a,i3,a)') 'This year has ',dayyr,' days' - else - write(title,'(a,i3,a)') 'All years have exactly ',dayyr,' days' - endif - status = nf90_put_att(ncid,nf90_global,'comment',title) - call ice_check_nc(status, subname// ' ERROR: global attribute comment', & - file=__FILE__, line=__LINE__) - - write(title,'(a,i8.8)') 'File written on model date ',idate - status = nf90_put_att(ncid,nf90_global,'comment2',title) - call ice_check_nc(status, subname// ' ERROR: global attribute date1', & - file=__FILE__, line=__LINE__) - - write(title,'(a,i6)') 'seconds elapsed into model date: ',msec - status = nf90_put_att(ncid,nf90_global,'comment3',title) - call ice_check_nc(status, subname// ' ERROR: global attribute date2', & - file=__FILE__, line=__LINE__) - - select case (histfreq(ns)) - case ("y", "Y") - write(time_period_freq,'(a,i0)') 'year_',histfreq_n(ns) - case ("m", "M") - write(time_period_freq,'(a,i0)') 'month_',histfreq_n(ns) - case ("d", "D") - write(time_period_freq,'(a,i0)') 'day_',histfreq_n(ns) - case ("h", "H") - write(time_period_freq,'(a,i0)') 'hour_',histfreq_n(ns) - case ("1") - write(time_period_freq,'(a,i0)') 'step_',histfreq_n(ns) - end select - - if (.not.write_ic .and. trim(time_period_freq) /= 'none') then - status = nf90_put_att(ncid,nf90_global,'time_period_freq',trim(time_period_freq)) - call ice_check_nc(status, subname// ' ERROR: global attribute time_period_freq', & - file=__FILE__, line=__LINE__) - endif + title = 'Diagnostic and Prognostic Variables' + status = nf90_put_att(ncid,nf90_global,'contents',title) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: global attribute contents') + + write(title,'(2a)') 'Los Alamos Sea Ice Model, ', trim(version_name) + status = nf90_put_att(ncid,nf90_global,'source',title) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: global attribute source') + + if (use_leap_years) then + write(title,'(a,i3,a)') 'This year has ',dayyr,' days' + else + write(title,'(a,i3,a)') 'All years have exactly ',dayyr,' days' + endif + status = nf90_put_att(ncid,nf90_global,'comment',title) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: global attribute comment') + + write(title,'(a,i8.8)') 'File written on model date ',idate + status = nf90_put_att(ncid,nf90_global,'comment2',title) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: global attribute date1') + + write(title,'(a,i6)') 'seconds elapsed into model date: ',msec + status = nf90_put_att(ncid,nf90_global,'comment3',title) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: global attribute date2') + + title = 'CF-1.0' + status = & + nf90_put_att(ncid,nf90_global,'conventions',title) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: in global attribute conventions') + + call date_and_time(date=current_date, time=current_time) + write(start_time,1000) current_date(1:4), current_date(5:6), & + current_date(7:8), current_time(1:2), & + current_time(3:4), current_time(5:8) +1000 format('This dataset was created on ', & + a,'-',a,'-',a,' at ',a,':',a,':',a) + + status = nf90_put_att(ncid,nf90_global,'history',start_time) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: global attribute history') + + status = nf90_put_att(ncid,nf90_global,'io_flavor','io_netcdf') + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: global attribute io_flavor') - if (hist_avg(ns)) then - status = nf90_put_att(ncid,nf90_global,'time_axis_position',trim(hist_time_axis)) - call ice_check_nc(status, subname// ' ERROR: global attribute time axis position', & - file=__FILE__, line=__LINE__) - endif + !----------------------------------------------------------------- + ! end define mode + !----------------------------------------------------------------- - title = 'CF-1.0' - status = nf90_put_att(ncid,nf90_global,'conventions',title) - call ice_check_nc(status, subname// ' ERROR: in global attribute conventions', & - file=__FILE__, line=__LINE__) - - call date_and_time(date=current_date, time=current_time) - write(start_time,1000) current_date(1:4), current_date(5:6), & - current_date(7:8), current_time(1:2), & - current_time(3:4), current_time(5:8) -1000 format('This dataset was created on ', & - a,'-',a,'-',a,' at ',a,':',a,':',a) - - status = nf90_put_att(ncid,nf90_global,'history',start_time) - call ice_check_nc(status, subname// ' ERROR: global attribute history', & - file=__FILE__, line=__LINE__) - - status = nf90_put_att(ncid,nf90_global,'io_flavor','io_netcdf') - call ice_check_nc(status, subname// ' ERROR: global attribute io_flavor', & - file=__FILE__, line=__LINE__) - - !----------------------------------------------------------------- - ! end define mode - !----------------------------------------------------------------- - - status = nf90_enddef(ncid) - call ice_check_nc(status, subname// ' ERROR: in nf90_enddef', & - file=__FILE__, line=__LINE__) - - !----------------------------------------------------------------- - ! write time variable - !----------------------------------------------------------------- - - ltime2 = timesecs/secday ! hist_time_axis = 'end' (default) - - ! Some coupled models require the time axis "stamp" to be in the middle - ! or even beginning of averaging interval. - if (hist_avg(ns)) then - if (trim(hist_time_axis) == "begin" ) ltime2 = time_beg(ns) - if (trim(hist_time_axis) == "middle") ltime2 = p5*(time_beg(ns)+time_end(ns)) - endif + status = nf90_enddef(ncid) + if (status /= nf90_noerr) call abort_ice(subname//'ERROR in nf90_enddef') - status = nf90_inq_varid(ncid,'time',varid) - call ice_check_nc(status, subname// ' ERROR: getting time varid', & - file=__FILE__, line=__LINE__) - status = nf90_put_var(ncid,varid,ltime2) - call ice_check_nc(status, subname// ' ERROR: writing time variable', & - file=__FILE__, line=__LINE__) - - !----------------------------------------------------------------- - ! write time_bounds info - !----------------------------------------------------------------- - - if (hist_avg(ns) .and. .not. write_ic) then - status = nf90_inq_varid(ncid,'time_bounds',varid) - call ice_check_nc(status, subname// ' ERROR: getting time_bounds id', & - file=__FILE__, line=__LINE__) - status = nf90_put_var(ncid,varid,time_beg(ns),start=(/1/)) - call ice_check_nc(status, subname// ' ERROR: writing time_beg', & - file=__FILE__, line=__LINE__) - status = nf90_put_var(ncid,varid,time_end(ns),start=(/2/)) - call ice_check_nc(status, subname// ' ERROR: writing time_end', & - file=__FILE__, line=__LINE__) - endif + !----------------------------------------------------------------- + ! write time variable + !----------------------------------------------------------------- + + status = nf90_inq_varid(ncid,'time',varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting time varid') + status = nf90_put_var(ncid,varid,ltime2) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing time variable') + + !----------------------------------------------------------------- + ! write time_bounds info + !----------------------------------------------------------------- + + if (hist_avg) then + status = nf90_inq_varid(ncid,'time_bounds',varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting time_bounds id') + status = nf90_put_var(ncid,varid,time_beg(ns),start=(/1/)) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing time_beg') + status = nf90_put_var(ncid,varid,time_end(ns),start=(/2/)) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing time_end') + endif endif ! master_task @@ -708,140 +751,138 @@ subroutine ice_write_hist (ns) ! write coordinate variables !----------------------------------------------------------------- - do i = 1,ncoord - if(icoord(i)) then - call broadcast_scalar(var_coord(i)%short_name,master_task) - SELECT CASE (var_coord(i)%short_name) - CASE ('TLON') - ! Convert T grid longitude from -180 -> 180 to 0 to 360 - work1 = TLON*rad_to_deg + c360 - where (work1 > c360) work1 = work1 - c360 - where (work1 < c0 ) work1 = work1 + c360 - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('TLAT') - work1 = TLAT*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('ULON') - work1 = ULON*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('ULAT') - work1 = ULAT*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('NLON') - work1 = NLON*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('NLAT') - work1 = NLAT*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('ELON') - work1 = ELON*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('ELAT') - work1 = ELAT*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - END SELECT - - if (my_task == master_task) then - status = nf90_inq_varid(ncid, var_coord(i)%short_name, varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//var_coord(i)%short_name, & - file=__FILE__, line=__LINE__) - status = nf90_put_var(ncid,varid,work_g1) - call ice_check_nc(status, subname// ' ERROR: writing'//var_coord(i)%short_name, & - file=__FILE__, line=__LINE__) - endif - endif - enddo - - ! Extra dimensions (NCAT, NFSD, VGRD*) - - do i = 1, nvar_grdz - if (igrdz(i)) then - call broadcast_scalar(var_grdz(i)%short_name,master_task) - if (my_task == master_task) then - status = nf90_inq_varid(ncid, var_grdz(i)%short_name, varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//var_grdz(i)%short_name, & - file=__FILE__, line=__LINE__) - SELECT CASE (var_grdz(i)%short_name) - CASE ('NCAT') - status = nf90_put_var(ncid,varid,hin_max(1:ncat_hist)) - CASE ('NFSD') - status = nf90_put_var(ncid,varid,floe_rad_c(1:nfsd_hist)) - CASE ('VGRDi') ! index - needed for Met Office analysis code - status = nf90_put_var(ncid,varid,(/(k, k=1,nzilyr)/)) - CASE ('VGRDs') ! index - needed for Met Office analysis code - status = nf90_put_var(ncid,varid,(/(k, k=1,nzslyr)/)) - CASE ('VGRDb') - status = nf90_put_var(ncid,varid,(/(k, k=1,nzblyr)/)) - CASE ('VGRDa') - status = nf90_put_var(ncid,varid,(/(k, k=1,nzalyr)/)) - END SELECT - call ice_check_nc(status, subname// ' ERROR: put var '//var_grdz(i)%short_name, & - file=__FILE__, line=__LINE__) - endif - endif - enddo + do i = 1,ncoord + call broadcast_scalar(var_coord(i)%short_name,master_task) + SELECT CASE (var_coord(i)%short_name) + CASE ('TLON') + ! Convert T grid longitude from -180 -> 180 to 0 to 360 + work1 = TLON*rad_to_deg + c360 + where (work1 > c360) work1 = work1 - c360 + where (work1 < c0 ) work1 = work1 + c360 + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('TLAT') + work1 = TLAT*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('ULON') + work1 = ULON*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('ULAT') + work1 = ULAT*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('NLON') + work1 = NLON*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('NLAT') + work1 = NLAT*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('ELON') + work1 = ELON*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + CASE ('ELAT') + work1 = ELAT*rad_to_deg + call gather_global(work_g1,work1,master_task,distrb_info) + END SELECT + + if (my_task == master_task) then + status = nf90_inq_varid(ncid, var_coord(i)%short_name, varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//var_coord(i)%short_name) + status = nf90_put_var(ncid,varid,work_g1) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing'//var_coord(i)%short_name) + endif + enddo + + ! Extra dimensions (NCAT, NFSD, VGRD*) + + do i = 1, nvar_grdz + if (igrdz(i)) then + call broadcast_scalar(var_grdz(i)%short_name,master_task) + if (my_task == master_task) then + status = nf90_inq_varid(ncid, var_grdz(i)%short_name, varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//var_grdz(i)%short_name) + SELECT CASE (var_grdz(i)%short_name) + CASE ('NCAT') + status = nf90_put_var(ncid,varid,hin_max(1:ncat_hist)) + CASE ('NFSD') + status = nf90_put_var(ncid,varid,floe_rad_c(1:nfsd_hist)) + CASE ('VGRDi') ! index - needed for Met Office analysis code + status = nf90_put_var(ncid,varid,(/(k, k=1,nzilyr)/)) + CASE ('VGRDs') ! index - needed for Met Office analysis code + status = nf90_put_var(ncid,varid,(/(k, k=1,nzslyr)/)) + CASE ('VGRDb') + status = nf90_put_var(ncid,varid,(/(k, k=1,nzblyr)/)) + CASE ('VGRDa') + status = nf90_put_var(ncid,varid,(/(k, k=1,nzalyr)/)) + END SELECT + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing'//var_grdz(i)%short_name) + endif + endif + enddo !----------------------------------------------------------------- ! write grid masks, area and rotation angle !----------------------------------------------------------------- do i = 1, nvar_grd - if (igrd(i)) then - call broadcast_scalar(var_grd(i)%req%short_name,master_task) - SELECT CASE (var_grd(i)%req%short_name) - CASE ('tmask') - call gather_global(work_g1, hm, master_task, distrb_info) - CASE ('umask') - call gather_global(work_g1, uvm, master_task, distrb_info) - CASE ('nmask') - call gather_global(work_g1, npm, master_task, distrb_info) - CASE ('emask') - call gather_global(work_g1, epm, master_task, distrb_info) - CASE ('tarea') - call gather_global(work_g1, tarea, master_task, distrb_info) - CASE ('uarea') - call gather_global(work_g1, uarea, master_task, distrb_info) - CASE ('narea') - call gather_global(work_g1, narea, master_task, distrb_info) - CASE ('earea') - call gather_global(work_g1, earea, master_task, distrb_info) - CASE ('blkmask') - call gather_global(work_g1, bm, master_task, distrb_info) - CASE ('dxu') - call gather_global(work_g1, dxU, master_task, distrb_info) - CASE ('dyu') - call gather_global(work_g1, dyU, master_task, distrb_info) - CASE ('dxt') - call gather_global(work_g1, dxT, master_task, distrb_info) - CASE ('dyt') - call gather_global(work_g1, dyT, master_task, distrb_info) - CASE ('dxn') - call gather_global(work_g1, dxN, master_task, distrb_info) - CASE ('dyn') - call gather_global(work_g1, dyN, master_task, distrb_info) - CASE ('dxe') - call gather_global(work_g1, dxE, master_task, distrb_info) - CASE ('dye') - call gather_global(work_g1, dyE, master_task, distrb_info) - CASE ('HTN') - call gather_global(work_g1, HTN, master_task, distrb_info) - CASE ('HTE') - call gather_global(work_g1, HTE, master_task, distrb_info) - CASE ('ANGLE') - call gather_global(work_g1, ANGLE, master_task, distrb_info) - CASE ('ANGLET') - call gather_global(work_g1, ANGLET,master_task, distrb_info) - END SELECT - - if (my_task == master_task) then - status = nf90_inq_varid(ncid, var_grd(i)%req%short_name, varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//var_grd(i)%req%short_name, & - file=__FILE__, line=__LINE__) - status = nf90_put_var(ncid,varid,work_g1) - call ice_check_nc(status, subname// ' ERROR: writing variable '//var_grd(i)%req%short_name, & - file=__FILE__, line=__LINE__) - endif - endif + if (igrd(i)) then + call broadcast_scalar(var_grd(i)%req%short_name,master_task) + SELECT CASE (var_grd(i)%req%short_name) + CASE ('tmask') + call gather_global(work_g1, hm, master_task, distrb_info) + CASE ('umask') + call gather_global(work_g1, uvm, master_task, distrb_info) + CASE ('nmask') + call gather_global(work_g1, npm, master_task, distrb_info) + CASE ('emask') + call gather_global(work_g1, epm, master_task, distrb_info) + CASE ('tarea') + call gather_global(work_g1, tarea, master_task, distrb_info) + CASE ('uarea') + call gather_global(work_g1, uarea, master_task, distrb_info) + CASE ('narea') + call gather_global(work_g1, narea, master_task, distrb_info) + CASE ('earea') + call gather_global(work_g1, earea, master_task, distrb_info) + CASE ('blkmask') + call gather_global(work_g1, bm, master_task, distrb_info) + CASE ('dxu') + call gather_global(work_g1, dxU, master_task, distrb_info) + CASE ('dyu') + call gather_global(work_g1, dyU, master_task, distrb_info) + CASE ('dxt') + call gather_global(work_g1, dxT, master_task, distrb_info) + CASE ('dyt') + call gather_global(work_g1, dyT, master_task, distrb_info) + CASE ('dxn') + call gather_global(work_g1, dxN, master_task, distrb_info) + CASE ('dyn') + call gather_global(work_g1, dyN, master_task, distrb_info) + CASE ('dxe') + call gather_global(work_g1, dxE, master_task, distrb_info) + CASE ('dye') + call gather_global(work_g1, dyE, master_task, distrb_info) + CASE ('HTN') + call gather_global(work_g1, HTN, master_task, distrb_info) + CASE ('HTE') + call gather_global(work_g1, HTE, master_task, distrb_info) + CASE ('ANGLE') + call gather_global(work_g1, ANGLE, master_task, distrb_info) + CASE ('ANGLET') + call gather_global(work_g1, ANGLET,master_task, distrb_info) + END SELECT + + if (my_task == master_task) then + status = nf90_inq_varid(ncid, var_grd(i)%req%short_name, varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//var_grd(i)%req%short_name) + status = nf90_put_var(ncid,varid,work_g1) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing variable '//var_grd(i)%req%short_name) + endif + endif enddo !---------------------------------------------------------------- @@ -849,78 +890,78 @@ subroutine ice_write_hist (ns) !---------------------------------------------------------------- if (f_bounds) then - if (my_task==master_task) then - allocate(work1_3(nverts,nx_global,ny_global)) - else - allocate(work1_3(1,1,1)) ! to save memory - endif + if (my_task==master_task) then + allocate(work1_3(nverts,nx_global,ny_global)) + else + allocate(work1_3(1,1,1)) ! to save memory + endif - work1_3(:,:,:) = c0 - work1 (:,:,:) = c0 - - do i = 1, nvar_verts - call broadcast_scalar(var_nverts(i)%short_name,master_task) - SELECT CASE (var_nverts(i)%short_name) - CASE ('lont_bounds') - do ivertex = 1, nverts - work1(:,:,:) = lont_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('latt_bounds') - do ivertex = 1, nverts - work1(:,:,:) = latt_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('lonu_bounds') - do ivertex = 1, nverts - work1(:,:,:) = lonu_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('latu_bounds') - do ivertex = 1, nverts - work1(:,:,:) = latu_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('lonn_bounds') - do ivertex = 1, nverts - work1(:,:,:) = lonn_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('latn_bounds') - do ivertex = 1, nverts - work1(:,:,:) = latn_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('lone_bounds') - do ivertex = 1, nverts - work1(:,:,:) = lone_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('late_bounds') - do ivertex = 1, nverts - work1(:,:,:) = late_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - END SELECT - - if (my_task == master_task) then - status = nf90_inq_varid(ncid, var_nverts(i)%short_name, varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//var_nverts(i)%short_name, & - file=__FILE__, line=__LINE__) - status = nf90_put_var(ncid,varid,work1_3) - call ice_check_nc(status, subname// ' ERROR: writing variable '//var_nverts(i)%short_name, & - file=__FILE__, line=__LINE__) - endif - enddo - deallocate(work1_3) + work1_3(:,:,:) = c0 + work1 (:,:,:) = c0 + + do i = 1, nvar_verts + call broadcast_scalar(var_nverts(i)%short_name,master_task) + SELECT CASE (var_nverts(i)%short_name) + CASE ('lont_bounds') + do ivertex = 1, nverts + work1(:,:,:) = lont_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('latt_bounds') + do ivertex = 1, nverts + work1(:,:,:) = latt_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('lonu_bounds') + do ivertex = 1, nverts + work1(:,:,:) = lonu_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('latu_bounds') + do ivertex = 1, nverts + work1(:,:,:) = latu_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('lonn_bounds') + do ivertex = 1, nverts + work1(:,:,:) = lonn_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('latn_bounds') + do ivertex = 1, nverts + work1(:,:,:) = latn_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('lone_bounds') + do ivertex = 1, nverts + work1(:,:,:) = lone_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + CASE ('late_bounds') + do ivertex = 1, nverts + work1(:,:,:) = late_bounds(ivertex,:,:,:) + call gather_global(work_g1, work1, master_task, distrb_info) + if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) + enddo + END SELECT + + if (my_task == master_task) then + status = nf90_inq_varid(ncid, var_nverts(i)%short_name, varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//var_nverts(i)%short_name) + status = nf90_put_var(ncid,varid,work1_3) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing variable '//var_nverts(i)%short_name) + endif + enddo + deallocate(work1_3) endif !----------------------------------------------------------------- @@ -930,223 +971,223 @@ subroutine ice_write_hist (ns) work_g1(:,:) = c0 do n=1,num_avail_hist_fields_2D - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call gather_global(work_g1, a2D(:,:,n,:), & - master_task, distrb_info) - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - status = nf90_put_var(ncid,varid,work_g1, & - count=(/nx_global,ny_global/)) - call ice_check_nc(status, subname// ' ERROR: writing variable '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - - endif + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + call gather_global(work_g1, a2D(:,:,n,:), & + master_task, distrb_info) + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) + status = nf90_put_var(ncid,varid,work_g1, & + count=(/nx_global,ny_global/)) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing variable '//avail_hist_fields(n)%vname) + endif + + endif enddo ! num_avail_hist_fields_2D work_g1(:,:) = c0 do n = n2D + 1, n3Dccum - nn = n - n2D - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - do k = 1, ncat_hist - call gather_global(work_g1, a3Dc(:,:,k,nn,:), & - master_task, distrb_info) - - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - status = nf90_put_var(ncid,varid,work_g1, & - start=(/ 1, 1,k/), & - count=(/nx_global,ny_global,1/)) - call ice_check_nc(status, subname// ' ERROR: writing variable '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - enddo ! k - endif + nn = n - n2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) + endif + do k = 1, ncat_hist + call gather_global(work_g1, a3Dc(:,:,k,nn,:), & + master_task, distrb_info) + + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) + status = nf90_put_var(ncid,varid,work_g1, & + start=(/ 1, 1,k/), & + count=(/nx_global,ny_global,1/)) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + endif enddo ! num_avail_hist_fields_3Dc work_g1(:,:) = c0 do n = n3Dccum+1, n3Dzcum - nn = n - n3Dccum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - do k = 1, nzilyr - call gather_global(work_g1, a3Dz(:,:,k,nn,:), & - master_task, distrb_info) - - if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_g1, & - start=(/ 1, 1,k/), & - count=(/nx_global,ny_global,1/)) - call ice_check_nc(status, subname// ' ERROR: writing variable '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - enddo ! k - endif + nn = n - n3Dccum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) + endif + do k = 1, nzilyr + call gather_global(work_g1, a3Dz(:,:,k,nn,:), & + master_task, distrb_info) + + if (my_task == master_task) then + status = nf90_put_var(ncid,varid,work_g1, & + start=(/ 1, 1,k/), & + count=(/nx_global,ny_global,1/)) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + endif enddo ! num_avail_hist_fields_3Dz work_g1(:,:) = c0 - do n = n3Dzcum+1, n3Dbcum - nn = n - n3Dzcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - do k = 1, nzblyr - call gather_global(work_g1, a3Db(:,:,k,nn,:), & - master_task, distrb_info) - - if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_g1, & - start=(/ 1, 1,k/), & - count=(/nx_global,ny_global,1/)) - call ice_check_nc(status, subname// ' ERROR: writing variable '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - enddo ! k - endif + do n = n3Dzcum+1, n3Dbcum + nn = n - n3Dzcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) + endif + do k = 1, nzblyr + call gather_global(work_g1, a3Db(:,:,k,nn,:), & + master_task, distrb_info) + + if (my_task == master_task) then + status = nf90_put_var(ncid,varid,work_g1, & + start=(/ 1, 1,k/), & + count=(/nx_global,ny_global,1/)) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + endif enddo ! num_avail_hist_fields_3Db work_g1(:,:) = c0 do n = n3Dbcum+1, n3Dacum - nn = n - n3Dbcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - do k = 1, nzalyr - call gather_global(work_g1, a3Da(:,:,k,nn,:), & - master_task, distrb_info) - - if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_g1, & - start=(/ 1, 1,k/), & - count=(/nx_global,ny_global,1/)) - call ice_check_nc(status, subname// ' ERROR: writing variable '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - enddo ! k - endif + nn = n - n3Dbcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) + endif + do k = 1, nzalyr + call gather_global(work_g1, a3Da(:,:,k,nn,:), & + master_task, distrb_info) + + if (my_task == master_task) then + status = nf90_put_var(ncid,varid,work_g1, & + start=(/ 1, 1,k/), & + count=(/nx_global,ny_global,1/)) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + endif enddo ! num_avail_hist_fields_3Da work_g1(:,:) = c0 do n = n3Dacum+1, n3Dfcum - nn = n - n3Dacum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - do k = 1, nfsd_hist - call gather_global(work_g1, a3Df(:,:,k,nn,:), & - master_task, distrb_info) - if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_g1, & - start=(/ 1, 1,k/), & - count=(/nx_global,ny_global,1/)) - call ice_check_nc(status, subname// ' ERROR: writing variable '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - enddo ! k - endif + nn = n - n3Dacum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) + endif + do k = 1, nfsd_hist + call gather_global(work_g1, a3Df(:,:,k,nn,:), & + master_task, distrb_info) + if (my_task == master_task) then + status = nf90_put_var(ncid,varid,work_g1, & + start=(/ 1, 1,k/), & + count=(/nx_global,ny_global,1/)) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + endif enddo ! num_avail_hist_fields_3Df work_g1(:,:) = c0 do n = n3Dfcum+1, n4Dicum - nn = n - n3Dfcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - do ic = 1, ncat_hist - do k = 1, nzilyr - call gather_global(work_g1, a4Di(:,:,k,ic,nn,:), & - master_task, distrb_info) - if (my_task == master_task) then + nn = n - n3Dfcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) + endif + do ic = 1, ncat_hist + do k = 1, nzilyr + call gather_global(work_g1, a4Di(:,:,k,ic,nn,:), & + master_task, distrb_info) + if (my_task == master_task) then status = nf90_put_var(ncid,varid,work_g1, & start=(/ 1, 1,k,ic/), & count=(/nx_global,ny_global,1, 1/)) - call ice_check_nc(status, subname// ' ERROR: writing variable '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - enddo ! k - enddo ! ic - endif + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + enddo ! ic + endif enddo ! num_avail_hist_fields_4Di work_g1(:,:) = c0 do n = n4Dicum+1, n4Dscum - nn = n - n4Dicum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - do ic = 1, ncat_hist - do k = 1, nzslyr - call gather_global(work_g1, a4Ds(:,:,k,ic,nn,:), & - master_task, distrb_info) - if (my_task == master_task) then + nn = n - n4Dicum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) + endif + do ic = 1, ncat_hist + do k = 1, nzslyr + call gather_global(work_g1, a4Ds(:,:,k,ic,nn,:), & + master_task, distrb_info) + if (my_task == master_task) then status = nf90_put_var(ncid,varid,work_g1, & start=(/ 1, 1,k,ic/), & count=(/nx_global,ny_global,1, 1/)) - call ice_check_nc(status, subname// ' ERROR: writing variable '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - enddo ! k - enddo ! ic - endif + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + enddo ! ic + endif enddo ! num_avail_hist_fields_4Ds do n = n4Dscum+1, n4Dfcum - nn = n - n4Dscum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - call ice_check_nc(status, subname// ' ERROR: getting varid for '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - do ic = 1, ncat_hist - do k = 1, nfsd_hist - call gather_global(work_g1, a4Df(:,:,k,ic,nn,:), & - master_task, distrb_info) - if (my_task == master_task) then + nn = n - n4Dscum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + if (my_task == master_task) then + status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) + endif + do ic = 1, ncat_hist + do k = 1, nfsd_hist + call gather_global(work_g1, a4Df(:,:,k,ic,nn,:), & + master_task, distrb_info) + if (my_task == master_task) then status = nf90_put_var(ncid,varid,work_g1, & start=(/ 1, 1,k,ic/), & count=(/nx_global,ny_global,1, 1/)) - call ice_check_nc(status, subname// ' ERROR: writing variable '//avail_hist_fields(n)%vname, & - file=__FILE__, line=__LINE__) - endif - enddo ! k - enddo ! ic - endif + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: writing variable '//avail_hist_fields(n)%vname) + endif + enddo ! k + enddo ! ic + endif enddo ! num_avail_hist_fields_4Df deallocate(work_g1) @@ -1157,102 +1198,83 @@ subroutine ice_write_hist (ns) if (my_task == master_task) then status = nf90_close(ncid) - call ice_check_nc(status, subname// ' ERROR: closing netCDF history file', & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: closing netCDF history file') write(nu_diag,*) ' ' - write(nu_diag,*) 'Finished writing ',trim(ncfile) + write(nu_diag,*) 'Finished writing ',trim(ncfile(ns)) endif #else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & + file=__FILE__, line=__LINE__) #endif end subroutine ice_write_hist !======================================================================= -! Defines a (time-dependent) history var in the history file -! variables have short_name, long_name and units, coordiantes and cell_measures attributes, -! and are compressed and chunked for 'hdf5' - subroutine ice_hist_field_def(ncid, hfield, lprecision, dimids, ns) + subroutine ice_write_hist_attrs(ncid, varid, hfield, ns) - use ice_history_shared, only: history_deflate, history_chunksize, history_format, ice_hist_field, & - history_precision, hist_avg - use ice_calendar, only: histfreq, histfreq_n, write_ic + use ice_kinds_mod + use ice_calendar, only: histfreq, histfreq_n + use ice_history_shared, only: ice_hist_field, history_precision, & + hist_avg +#ifdef USE_NETCDF + use netcdf +#endif - integer(kind=int_kind), intent(in) :: ncid, dimids(:), lprecision, ns - type(ice_hist_field), intent(in) :: hfield + integer (kind=int_kind), intent(in) :: ncid ! netcdf file id + integer (kind=int_kind), intent(in) :: varid ! netcdf variable id + type (ice_hist_field) , intent(in) :: hfield ! history file info + integer (kind=int_kind), intent(in) :: ns ! history stream - !local vars - integer(kind=int_kind) :: chunks(size(dimids)), i, status, varid + ! local variables - character(len=*), parameter :: subname = '(ice_hist_field_def)' + integer (kind=int_kind) :: status + character(len=*), parameter :: subname = '(ice_write_hist_attrs)' #ifdef USE_NETCDF - status = nf90_def_var(ncid, hfield%vname, lprecision, dimids, varid) - call ice_check_nc(status, subname//' ERROR: defining var '//trim(hfield%vname),file=__FILE__,line=__LINE__) - - if (history_format=='hdf5' .and. size(dimids)>1) then - if (dimids(1)==imtid .and. dimids(2)==jmtid) then - chunks(1)=history_chunksize(1) - chunks(2)=history_chunksize(2) - do i = 3, size(dimids) - chunks(i) = 0 - enddo - status = nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, chunksizes=chunks) - call ice_check_nc(status, subname//' ERROR chunking var '//trim(hfield%vname), file=__FILE__, line=__LINE__) - endif - endif - - if (history_format=='hdf5' .and. history_deflate/=0) then - status = nf90_def_var_deflate(ncid, varid, shuffle=0, deflate=1, deflate_level=history_deflate) - call ice_check_nc(status, subname//' ERROR deflating var '//trim(hfield%vname), file=__FILE__, line=__LINE__) - endif - - ! add attributes status = nf90_put_att(ncid,varid,'units', hfield%vunit) - call ice_check_nc(status, subname// ' ERROR: defining units for '//hfield%vname, & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining units for '//hfield%vname) status = nf90_put_att(ncid,varid, 'long_name', hfield%vdesc) - call ice_check_nc(status, subname// ' ERROR: defining long_name for '//hfield%vname, & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining long_name for '//hfield%vname) status = nf90_put_att(ncid,varid,'coordinates', hfield%vcoord) - call ice_check_nc(status, subname// ' ERROR: defining coordinates for '//hfield%vname, & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining coordinates for '//hfield%vname) status = nf90_put_att(ncid,varid,'cell_measures', hfield%vcellmeas) - call ice_check_nc(status, subname// ' ERROR: defining cell measures for '//hfield%vname, & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining cell measures for '//hfield%vname) - if (hfield%vcomment /= "none") then - status = nf90_put_att(ncid,varid,'comment', hfield%vcomment) - call ice_check_nc(status, subname// ' ERROR: defining comment for '//hfield%vname, & - file=__FILE__, line=__LINE__) + if (hfield%vcomment /= "none") then + status = nf90_put_att(ncid,varid,'comment', hfield%vcomment) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining comment for '//hfield%vname) endif call ice_write_hist_fill(ncid,varid,hfield%vname,history_precision) ! Add cell_methods attribute to variables if averaged - if (hist_avg(ns) .and. .not. write_ic) then + if (hist_avg) then if (TRIM(hfield%vname(1:4))/='sig1' & .and.TRIM(hfield%vname(1:4))/='sig2' & .and.TRIM(hfield%vname(1:9))/='sistreave' & .and.TRIM(hfield%vname(1:9))/='sistremax' & .and.TRIM(hfield%vname(1:4))/='sigP') then - status = nf90_put_att(ncid,varid,'cell_methods','time: mean') - call ice_check_nc(status, subname// ' ERROR: defining cell methods for '//hfield%vname, & - file=__FILE__, line=__LINE__) + status = nf90_put_att(ncid,varid,'cell_methods','time: mean') + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining cell methods for '//hfield%vname) endif endif if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg(ns) & - .or. write_ic & + .or..not. hist_avg & .or.TRIM(hfield%vname(1:4))=='divu' & .or.TRIM(hfield%vname(1:5))=='shear' & - .or.TRIM(hfield%vname(1:4))=='vort' & .or.TRIM(hfield%vname(1:4))=='sig1' & .or.TRIM(hfield%vname(1:4))=='sig2' & .or.TRIM(hfield%vname(1:4))=='sigP' & @@ -1267,20 +1289,25 @@ subroutine ice_hist_field_def(ncid, hfield, lprecision, dimids, ns) else status = nf90_put_att(ncid,varid,'time_rep','averaged') endif - call ice_check_nc(status, subname// ' ERROR: defining time rep for '//hfield%vname, & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining time rep for '//hfield%vname) #else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & + file=__FILE__, line=__LINE__) #endif - end subroutine ice_hist_field_def + end subroutine ice_write_hist_attrs !======================================================================= -! Defines missing_value and _FillValue attributes subroutine ice_write_hist_fill(ncid,varid,vname,precision) + use ice_kinds_mod +#ifdef USE_NETCDF + use netcdf +#endif + integer (kind=int_kind), intent(in) :: ncid ! netcdf file id integer (kind=int_kind), intent(in) :: varid ! netcdf var id character(len=*), intent(in) :: vname ! var name @@ -1297,75 +1324,23 @@ subroutine ice_write_hist_fill(ncid,varid,vname,precision) else status = nf90_put_att(ncid,varid,'missing_value',spval) endif - call ice_check_nc(status, subname// ' ERROR: defining missing_value for '//trim(vname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining missing_value for '//trim(vname)) if (precision == 8) then status = nf90_put_att(ncid,varid,'_FillValue',spval_dbl) else status = nf90_put_att(ncid,varid,'_FillValue',spval) endif - call ice_check_nc(status, subname// ' ERROR: defining _FillValue for '//trim(vname), & - file=__FILE__, line=__LINE__) + if (status /= nf90_noerr) call abort_ice(subname// & + 'ERROR: defining _FillValue for '//trim(vname)) #else - call abort_ice(subname//' ERROR : USE_NETCDF cpp not defined', file=__FILE__, line=__LINE__) + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & + file=__FILE__, line=__LINE__) #endif end subroutine ice_write_hist_fill -!======================================================================= -! Defines a coordinate var in the history file -! coordinates have short_name, long_name and units attributes, -! and are compressed for 'hdf5' when more than one dimensional - - subroutine ice_hist_coord_def(ncid, coord, lprecision, dimids, varid) - - use ice_history_shared, only: history_deflate, history_format, history_chunksize - - integer(kind=int_kind), intent(in) :: ncid, dimids(:), lprecision - type(coord_attributes), intent(in) :: coord - integer(kind=int_kind), intent(inout) :: varid - - !local vars - integer(kind=int_kind) ::chunks(size(dimids)), i, status - - character(len=*), parameter :: subname = '(ice_hist_coord_def)' - -#ifdef USE_NETCDF - status = nf90_def_var(ncid, coord%short_name, lprecision, dimids, varid) - call ice_check_nc(status, subname//' ERROR: defining coord '//coord%short_name,file=__FILE__,line=__LINE__) - - if (history_format=='hdf5' .and. size(dimids)>1) then - if (dimids(1)==imtid .and. dimids(2)==jmtid) then - chunks(1)=history_chunksize(1) - chunks(2)=history_chunksize(2) - do i = 3, size(dimids) - chunks(i) = 0 - enddo - status = nf90_def_var_chunking(ncid, varid, NF90_CHUNKED, chunksizes=chunks) - call ice_check_nc(status, subname//' ERROR chunking var '//trim(coord%short_name), file=__FILE__, line=__LINE__) - endif - endif - - if (history_format=='hdf5' .and. history_deflate/=0) then - status=nf90_def_var_deflate(ncid, varid, shuffle=0, deflate=1, deflate_level=history_deflate) - call ice_check_nc(status, subname//' ERROR deflating var '//trim(coord%short_name), file=__FILE__, line=__LINE__) - endif - - status = nf90_put_att(ncid,varid,'long_name',trim(coord%long_name)) - call ice_check_nc(status, subname// ' ERROR: defining long_name for '//coord%short_name, & - file=__FILE__, line=__LINE__) - status = nf90_put_att(ncid, varid, 'units', trim(coord%units)) - call ice_check_nc(status, subname// ' ERROR: defining units for '//coord%short_name, & - file=__FILE__, line=__LINE__) - -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) -#endif - - end subroutine ice_hist_coord_def - !======================================================================= end module ice_history_write diff --git a/cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 index b8971a872..6407d8c76 100644 --- a/cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_history_write.F90 @@ -1,6 +1,6 @@ !======================================================================= ! -! Writes history in netCDF format using NCAR ParallelIO library +! Writes history in netCDF format ! ! authors Tony Craig and Bruce Briegleb, NCAR ! Elizabeth C. Hunke and William H. Lipscomb, LANL @@ -18,32 +18,16 @@ module ice_history_write use ice_kinds_mod - use ice_constants, only: c0, c360, p5, spval, spval_dbl + use ice_constants, only: c0, c360, spval, spval_dbl use ice_fileunits, only: nu_diag use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted use icepack_intfc, only: icepack_query_parameters - use ice_calendar, only: write_ic, histfreq - use ice_pio implicit none private - - TYPE coord_attributes ! netcdf coordinate attributes - character (len=11) :: short_name - character (len=45) :: long_name - character (len=30) :: units - END TYPE coord_attributes - - TYPE req_attributes ! req'd netcdf attributes - type (coord_attributes) :: req - character (len=20) :: coordinates - END TYPE req_attributes - public :: ice_write_hist - integer (kind=int_kind) :: imtid,jmtid - !======================================================================= contains @@ -58,8 +42,8 @@ subroutine ice_write_hist (ns) use ice_blocks, only: nx_block, ny_block use ice_broadcast, only: broadcast_scalar - use ice_calendar, only: msec, timesecs, idate, idate0, & - histfreq_n, days_per_year, use_leap_years, dayyr, & + use ice_calendar, only: msec, timesecs, idate, idate0, write_ic, & + histfreq, histfreq_n, days_per_year, use_leap_years, dayyr, & hh_init, mm_init, ss_init use ice_communicate, only: my_task, master_task use ice_domain, only: distrb_info, nblocks @@ -73,7 +57,8 @@ subroutine ice_write_hist (ns) lonn_bounds, latn_bounds, lone_bounds, late_bounds use ice_history_shared use ice_arrays_column, only: hin_max, floe_rad_c - use ice_restart_shared, only: runid + use ice_restart_shared, only: runid, lcdf64 + use ice_pio use pio integer (kind=int_kind), intent(in) :: ns @@ -81,7 +66,7 @@ subroutine ice_write_hist (ns) ! local variables integer (kind=int_kind) :: i,j,k,ic,n,nn, & - ncid,status,kmtidi,kmtids,kmtidb, cmtid,timid, & + ncid,status,imtid,jmtid,kmtidi,kmtids,kmtidb, cmtid,timid, & length,nvertexid,ivertex,kmtida,fmtid integer (kind=int_kind), dimension(2) :: dimid2 integer (kind=int_kind), dimension(3) :: dimid3 @@ -90,15 +75,15 @@ subroutine ice_write_hist (ns) integer (kind=int_kind), dimension(3) :: dimid_nverts integer (kind=int_kind), dimension(6) :: dimidex real (kind= dbl_kind) :: ltime2 - character (len=8) :: cdate - character (len=char_len_long) :: title, cal_units, cal_att - character (len=char_len) :: time_period_freq = 'none' - character (len=char_len_long) :: ncfile + character (char_len) :: title + character (char_len_long) :: ncfile(max_nstrm) + integer (kind=int_kind) :: iotype - integer (kind=int_kind) :: icategory,ind,i_aice,boundid, lprecision + integer (kind=int_kind) :: icategory,ind,i_aice,boundid - character (len=char_len) :: start_time,current_date,current_time + character (char_len) :: start_time,current_date,current_time character (len=16) :: c_aice + character (len=8) :: cdate type(file_desc_t) :: File type(io_desc_t) :: iodesc2d, & @@ -107,8 +92,8 @@ subroutine ice_write_hist (ns) iodesc4di, iodesc4ds, iodesc4df type(var_desc_t) :: varid - ! time coord - TYPE(coord_attributes) :: time_coord + ! 8 coordinate variables: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT + INTEGER (kind=int_kind), PARAMETER :: ncoord = 8 ! 4 vertices in each grid cell INTEGER (kind=int_kind), PARAMETER :: nverts = 4 @@ -118,6 +103,17 @@ subroutine ice_write_hist (ns) ! lonn_bounds, latn_bounds, lone_bounds, late_bounds INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 8 + TYPE coord_attributes ! netcdf coordinate attributes + character (len=11) :: short_name + character (len=45) :: long_name + character (len=20) :: units + END TYPE coord_attributes + + TYPE req_attributes ! req'd netcdf attributes + type (coord_attributes) :: req + character (len=20) :: coordinates + END TYPE req_attributes + TYPE(req_attributes), dimension(nvar_grd) :: var_grd TYPE(coord_attributes), dimension(ncoord) :: var_coord TYPE(coord_attributes), dimension(nvar_verts) :: var_nverts @@ -134,7 +130,8 @@ subroutine ice_write_hist (ns) real (kind=real_kind), allocatable :: workr4(:,:,:,:,:) real (kind=real_kind), allocatable :: workr3v(:,:,:,:) - character(len=char_len_long) :: filename + character(len=char_len_long) :: & + filename integer (kind=int_kind), dimension(1) :: & tim_start,tim_length ! dimension quantities for netCDF @@ -145,7 +142,7 @@ subroutine ice_write_hist (ns) real (kind=dbl_kind) :: secday real (kind=dbl_kind) :: rad_to_deg - logical (kind=log_kind), save :: first_call = .true. + integer (kind=int_kind) :: lprecision character(len=*), parameter :: subname = '(ice_write_hist)' @@ -156,23 +153,25 @@ subroutine ice_write_hist (ns) file=__FILE__, line=__LINE__) if (my_task == master_task) then - call construct_filename(ncfile,'nc',ns) + call construct_filename(ncfile(ns),'nc',ns) ! add local directory path name to ncfile if (write_ic) then - ncfile = trim(incond_dir)//ncfile + ncfile(ns) = trim(incond_dir)//ncfile(ns) else - ncfile = trim(history_dir)//ncfile + ncfile(ns) = trim(history_dir)//ncfile(ns) endif - filename = ncfile + filename = ncfile(ns) end if call broadcast_scalar(filename, master_task) ! create file + + iotype = PIO_IOTYPE_NETCDF + if (history_format == 'pio_pnetcdf') iotype = PIO_IOTYPE_PNETCDF File%fh=-1 call ice_pio_init(mode='write', filename=trim(filename), File=File, & - clobber=.true., fformat=trim(history_format), rearr=trim(history_rearranger), & - iotasks=history_iotasks, root=history_root, stride=history_stride, debug=first_call) + clobber=.true., cdf64=lcdf64, iotype=iotype) call ice_pio_initdecomp(iodesc=iodesc2d, precision=history_precision) call ice_pio_initdecomp(ndim3=ncat_hist, iodesc=iodesc3dc, precision=history_precision) @@ -185,6 +184,8 @@ subroutine ice_write_hist (ns) call ice_pio_initdecomp(ndim3=nzslyr, ndim4=ncat_hist, iodesc=iodesc4ds, precision=history_precision) call ice_pio_initdecomp(ndim3=nfsd_hist, ndim4=ncat_hist, iodesc=iodesc4df, precision=history_precision) + ltime2 = timesecs/secday + ! option of turning on double precision history files lprecision = pio_real if (history_precision == 8) lprecision = pio_double @@ -192,123 +193,100 @@ subroutine ice_write_hist (ns) !----------------------------------------------------------------- ! define dimensions !----------------------------------------------------------------- - call pio_seterrorhandling(File, PIO_RETURN_ERROR) - - if (hist_avg(ns) .and. .not. write_ic) then - call ice_pio_check(pio_def_dim(File,'nbnd',2,boundid), & - subname//' ERROR: defining dim nbnd with len 2',file=__FILE__,line=__LINE__) - endif - - call ice_pio_check(pio_def_dim(File,'ni',nx_global,imtid), & - subname//' ERROR: defining dim ni',file=__FILE__,line=__LINE__) - - call ice_pio_check(pio_def_dim(File,'nj',ny_global,jmtid), & - subname//' ERROR: defining dim nj',file=__FILE__,line=__LINE__) - call ice_pio_check(pio_def_dim(File,'nc',ncat_hist,cmtid), & - subname//' ERROR: defining dim nc',file=__FILE__,line=__LINE__) - - call ice_pio_check(pio_def_dim(File,'nkice',nzilyr,kmtidi), & - subname//' ERROR: defining dim nkice',file=__FILE__,line=__LINE__) - - call ice_pio_check(pio_def_dim(File,'nksnow',nzslyr,kmtids), & - subname//' ERROR: defining dim nksnow',file=__FILE__,line=__LINE__) - - call ice_pio_check(pio_def_dim(File,'nkbio',nzblyr,kmtidb), & - subname//' ERROR: defining dim nkbio',file=__FILE__,line=__LINE__) - - call ice_pio_check(pio_def_dim(File,'nkaer',nzalyr,kmtida), & - subname//' ERROR: defining dim nkaer',file=__FILE__,line=__LINE__) - - call ice_pio_check(pio_def_dim(File,'time',PIO_UNLIMITED,timid), & - subname//' ERROR: defining dim time',file=__FILE__,line=__LINE__) - - call ice_pio_check(pio_def_dim(File,'nvertices',nverts,nvertexid), & - subname//' ERROR: defining dim nvertices',file=__FILE__,line=__LINE__) + if (hist_avg) then + status = pio_def_dim(File,'d2',2,boundid) + endif - call ice_pio_check(pio_def_dim(File,'nf',nfsd_hist,fmtid), & - subname//' ERROR: defining dim nf',file=__FILE__,line=__LINE__) + status = pio_def_dim(File,'ni',nx_global,imtid) + status = pio_def_dim(File,'nj',ny_global,jmtid) + status = pio_def_dim(File,'nc',ncat_hist,cmtid) + status = pio_def_dim(File,'nkice',nzilyr,kmtidi) + status = pio_def_dim(File,'nksnow',nzslyr,kmtids) + status = pio_def_dim(File,'nkbio',nzblyr,kmtidb) + status = pio_def_dim(File,'nkaer',nzalyr,kmtida) + status = pio_def_dim(File,'time',PIO_UNLIMITED,timid) + status = pio_def_dim(File,'nvertices',nverts,nvertexid) + status = pio_def_dim(File,'nf',nfsd_hist,fmtid) !----------------------------------------------------------------- ! define coordinate variables: time, time_bounds !----------------------------------------------------------------- - write(cdate,'(i8.8)') idate0 - write(cal_units,'(a,a4,a1,a2,a1,a2,a1,i2.2,a1,i2.2,a1,i2.2)') 'days since ', & - cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' ', & - hh_init,':',mm_init,':',ss_init - - if (days_per_year == 360) then - cal_att='360_day' - elseif (days_per_year == 365 .and. .not.use_leap_years ) then - cal_att='noleap' - elseif (use_leap_years) then - cal_att='Gregorian' - else - call abort_ice(subname//' ERROR: invalid calendar settings') - endif - - time_coord = coord_attributes('time', 'time', trim(cal_units)) - call ice_hist_coord_def(File, time_coord, pio_double, (/timid/), varid) - call ice_pio_check(pio_put_att(File,varid,'calendar',cal_att), & - subname//' ERROR: defining att calendar: '//cal_att,file=__FILE__,line=__LINE__) - if (hist_avg(ns) .and. .not. write_ic) then - call ice_pio_check(pio_put_att(File,varid,'bounds','time_bounds'), & - subname//' ERROR: defining att bounds time_bounds',file=__FILE__,line=__LINE__) - endif - - ! Define coord time_bounds if hist_avg is true - if (hist_avg(ns) .and. .not. write_ic) then - time_coord = coord_attributes('time_bounds', 'time interval endpoints', trim(cal_units)) + status = pio_def_var(File,'time',pio_double,(/timid/),varid) + status = pio_put_att(File,varid,'long_name','model time') + + write(cdate,'(i8.8)') idate0 + write(title,'(a,a4,a1,a2,a1,a2,a1,i2.2,a1,i2.2,a1,i2.2)') 'days since ', & + cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' ', & + hh_init,':',mm_init,':',ss_init + status = pio_put_att(File,varid,'units',trim(title)) + + if (days_per_year == 360) then + status = pio_put_att(File,varid,'calendar','360_day') + elseif (days_per_year == 365 .and. .not.use_leap_years ) then + status = pio_put_att(File,varid,'calendar','NoLeap') + elseif (use_leap_years) then + status = pio_put_att(File,varid,'calendar','Gregorian') + else + call abort_ice(subname//'ERROR: invalid calendar settings') + endif - dimid2(1) = boundid - dimid2(2) = timid + if (hist_avg) then + status = pio_put_att(File,varid,'bounds','time_bounds') + endif - call ice_hist_coord_def(File, time_coord, pio_double, dimid2, varid) - call ice_pio_check(pio_put_att(File,varid,'calendar',cal_att), & - subname//' ERROR: defining att calendar: '//cal_att,file=__FILE__,line=__LINE__) - endif + ! Define attributes for time_bounds if hist_avg is true + if (hist_avg) then + dimid2(1) = boundid + dimid2(2) = timid + status = pio_def_var(File,'time_bounds',pio_double,dimid2,varid) + status = pio_put_att(File,varid,'long_name', & + 'boundaries for time-averaging interval') + write(cdate,'(i8.8)') idate0 + write(title,'(a,a4,a1,a2,a1,a2,a1,i2.2,a1,i2.2,a1,i2.2)') 'days since ', & + cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' ', & + hh_init,':',mm_init,':',ss_init + status = pio_put_att(File,varid,'units',trim(title)) + endif !----------------------------------------------------------------- ! define information for required time-invariant variables !----------------------------------------------------------------- - do ind = 1, ncoord - select case (ind) - case(n_tlon) - var_coord(ind) = coord_attributes('TLON', & - 'T grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lont_bounds' - case(n_tlat) - var_coord(ind) = coord_attributes('TLAT', & - 'T grid center latitude', 'degrees_north') - coord_bounds(ind) = 'latt_bounds' - case(n_ulon) - var_coord(ind) = coord_attributes('ULON', & - 'U grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lonu_bounds' - case(n_ulat) - var_coord(ind) = coord_attributes('ULAT', & - 'U grid center latitude', 'degrees_north') - coord_bounds(ind) = 'latu_bounds' - case(n_nlon) - var_coord(ind) = coord_attributes('NLON', & - 'N grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lonn_bounds' - case(n_nlat) - var_coord(ind) = coord_attributes('NLAT', & - 'N grid center latitude', 'degrees_north') - coord_bounds(ind) = 'latn_bounds' - case(n_elon) - var_coord(ind) = coord_attributes('ELON', & - 'E grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lone_bounds' - case(n_elat) - var_coord(ind) = coord_attributes('ELAT', & - 'E grid center latitude', 'degrees_north') - coord_bounds(ind) = 'late_bounds' - end select - end do + ind = 0 + ind = ind + 1 + var_coord(ind) = coord_attributes('TLON', & + 'T grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lont_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('TLAT', & + 'T grid center latitude', 'degrees_north') + coord_bounds(ind) = 'latt_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ULON', & + 'U grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lonu_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ULAT', & + 'U grid center latitude', 'degrees_north') + coord_bounds(ind) = 'latu_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('NLON', & + 'N grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lonn_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('NLAT', & + 'N grid center latitude', 'degrees_north') + coord_bounds(ind) = 'latn_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ELON', & + 'E grid center longitude', 'degrees_east') + coord_bounds(ind) = 'lone_bounds' + ind = ind + 1 + var_coord(ind) = coord_attributes('ELAT', & + 'E grid center latitude', 'degrees_north') + coord_bounds(ind) = 'late_bounds' var_grdz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') var_grdz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') @@ -414,172 +392,232 @@ subroutine ice_write_hist (ns) ! define attributes for time-invariant variables !----------------------------------------------------------------- - dimid2(1) = imtid - dimid2(2) = jmtid - - do i = 1, ncoord - if (icoord(i)) then - call ice_hist_coord_def(File, var_coord(i), lprecision, dimid2, varid) - call ice_write_hist_fill(File,varid,var_coord(i)%short_name,history_precision) - if (var_coord(i)%short_name == 'ULAT') then - call ice_pio_check(pio_put_att(File,varid,'comment', & - trim('Latitude of NE corner of T grid cell')), & - subname//' ERROR: defining att comment',file=__FILE__,line=__LINE__) - endif - if (f_bounds) then - call ice_pio_check(pio_put_att(File, varid, 'bounds', trim(coord_bounds(i))), & - subname//' ERROR: defining att bounds '//trim(coord_bounds(i)),file=__FILE__,line=__LINE__) - endif - endif - enddo - - ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR, NZALYR, NFSD) - dimidex(1)=cmtid - dimidex(2)=kmtidi - dimidex(3)=kmtids - dimidex(4)=kmtidb - dimidex(5)=kmtida - dimidex(6)=fmtid - - do i = 1, nvar_grdz - if (igrdz(i)) then - call ice_hist_coord_def(File, var_grdz(i), lprecision, dimidex(i:i), varid) - endif - enddo - - do i = 1, nvar_grd - if (igrd(i)) then - call ice_hist_coord_def(File, var_grd(i)%req, lprecision, dimid2, varid) - call ice_pio_check(pio_put_att(File, varid, 'coordinates', trim(var_grd(i)%coordinates)), & - subname//' ERROR: defining att coordinates '//trim(var_grd(i)%coordinates),file=__FILE__,line=__LINE__) - call ice_write_hist_fill(File,varid,var_grd(i)%req%short_name,history_precision) - endif - enddo - - ! Fields with dimensions (nverts,nx,ny) - dimid_nverts(1) = nvertexid - dimid_nverts(2) = imtid - dimid_nverts(3) = jmtid - do i = 1, nvar_verts - if (f_bounds) then - call ice_hist_coord_def(File, var_nverts(i), lprecision, dimid_nverts, varid) - call ice_write_hist_fill(File,varid,var_nverts(i)%short_name,history_precision) - endif - enddo + dimid2(1) = imtid + dimid2(2) = jmtid + + do i = 1, ncoord + status = pio_def_var(File, trim(var_coord(i)%short_name), lprecision, & + dimid2, varid) + status = pio_put_att(File,varid,'long_name',trim(var_coord(i)%long_name)) + status = pio_put_att(File, varid, 'units', trim(var_coord(i)%units)) + call ice_write_hist_fill(File,varid,var_coord(i)%short_name,history_precision) + if (var_coord(i)%short_name == 'ULAT') then + status = pio_put_att(File,varid,'comment', & + trim('Latitude of NE corner of T grid cell')) + endif + if (f_bounds) then + status = pio_put_att(File, varid, 'bounds', trim(coord_bounds(i))) + endif + enddo + + ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR, NZALYR, NFSD) + dimidex(1)=cmtid + dimidex(2)=kmtidi + dimidex(3)=kmtids + dimidex(4)=kmtidb + dimidex(5)=kmtida + dimidex(6)=fmtid + + do i = 1, nvar_grdz + if (igrdz(i)) then + status = pio_def_var(File, trim(var_grdz(i)%short_name), lprecision, & + (/dimidex(i)/), varid) + status = pio_put_att(File, varid, 'long_name', var_grdz(i)%long_name) + status = pio_put_att(File, varid, 'units' , var_grdz(i)%units) + endif + enddo + + do i = 1, nvar_grd + if (igrd(i)) then + status = pio_def_var(File, trim(var_grd(i)%req%short_name), & + lprecision, dimid2, varid) + status = pio_put_att(File,varid, 'long_name', trim(var_grd(i)%req%long_name)) + status = pio_put_att(File, varid, 'units', trim(var_grd(i)%req%units)) + status = pio_put_att(File, varid, 'coordinates', trim(var_grd(i)%coordinates)) + call ice_write_hist_fill(File,varid,var_grd(i)%req%short_name,history_precision) + endif + enddo + + ! Fields with dimensions (nverts,nx,ny) + dimid_nverts(1) = nvertexid + dimid_nverts(2) = imtid + dimid_nverts(3) = jmtid + do i = 1, nvar_verts + if (f_bounds) then + status = pio_def_var(File, trim(var_nverts(i)%short_name), & + lprecision,dimid_nverts, varid) + status = & + pio_put_att(File,varid, 'long_name', trim(var_nverts(i)%long_name)) + status = & + pio_put_att(File, varid, 'units', trim(var_nverts(i)%units)) + call ice_write_hist_fill(File,varid,var_nverts(i)%short_name,history_precision) + endif + enddo !----------------------------------------------------------------- ! define attributes for time-variant variables !----------------------------------------------------------------- + !----------------------------------------------------------------- ! 2D - dimid3(1) = imtid - dimid3(2) = jmtid - dimid3(3) = timid + !----------------------------------------------------------------- - do n=1,num_avail_hist_fields_2D - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(File, avail_hist_fields(n),lprecision, dimid3, ns) - endif - enddo + dimid3(1) = imtid + dimid3(2) = jmtid + dimid3(3) = timid + do n=1,num_avail_hist_fields_2D + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & + lprecision, dimid3, varid) + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_2D + + !----------------------------------------------------------------- ! 3D (category) - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = cmtid - dimidz(4) = timid + !----------------------------------------------------------------- - do n = n2D + 1, n3Dccum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(File, avail_hist_fields(n),lprecision, dimidz,ns) - endif - enddo ! num_avail_hist_fields_3Dc + dimidz(1) = imtid + dimidz(2) = jmtid + dimidz(3) = cmtid + dimidz(4) = timid + do n = n2D + 1, n3Dccum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & + lprecision, dimidz, varid) + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_3Dc + + !----------------------------------------------------------------- ! 3D (ice layers) - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = kmtidi - dimidz(4) = timid + !----------------------------------------------------------------- - do n = n3Dccum + 1, n3Dzcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(File, avail_hist_fields(n),lprecision, dimidz,ns) - endif - enddo ! num_avail_hist_fields_3Dz + dimidz(1) = imtid + dimidz(2) = jmtid + dimidz(3) = kmtidi + dimidz(4) = timid + + do n = n3Dccum + 1, n3Dzcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & + lprecision, dimidz, varid) + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_3Dz + !----------------------------------------------------------------- ! 3D (biology ice layers) - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = kmtidb - dimidz(4) = timid + !----------------------------------------------------------------- - do n = n3Dzcum + 1, n3Dbcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(File, avail_hist_fields(n),lprecision, dimidz,ns) - endif - enddo ! num_avail_hist_fields_3Db + dimidz(1) = imtid + dimidz(2) = jmtid + dimidz(3) = kmtidb + dimidz(4) = timid + + do n = n3Dzcum + 1, n3Dbcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & + lprecision, dimidz, varid) + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_3Db + !----------------------------------------------------------------- ! 3D (biology snow layers) - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = kmtida - dimidz(4) = timid + !----------------------------------------------------------------- - do n = n3Dbcum + 1, n3Dacum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(File, avail_hist_fields(n),lprecision, dimidz,ns) - endif - enddo ! num_avail_hist_fields_3Da + dimidz(1) = imtid + dimidz(2) = jmtid + dimidz(3) = kmtida + dimidz(4) = timid + do n = n3Dbcum + 1, n3Dacum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & + lprecision, dimidz, varid) + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_3Da + + !----------------------------------------------------------------- ! 3D (fsd) - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = fmtid - dimidz(4) = timid + !----------------------------------------------------------------- - do n = n3Dacum + 1, n3Dfcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(File, avail_hist_fields(n),lprecision, dimidz,ns) - endif - enddo ! num_avail_hist_fields_3Df + dimidz(1) = imtid + dimidz(2) = jmtid + dimidz(3) = fmtid + dimidz(4) = timid + + do n = n3Dacum + 1, n3Dfcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & + lprecision, dimidz, varid) + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_3Df + !----------------------------------------------------------------- + ! define attributes for 4D variables + ! time coordinate is dropped + !----------------------------------------------------------------- + + !----------------------------------------------------------------- ! 4D (ice categories) - dimidcz(1) = imtid - dimidcz(2) = jmtid - dimidcz(3) = kmtidi - dimidcz(4) = cmtid - dimidcz(5) = timid + !----------------------------------------------------------------- - do n = n3Dfcum + 1, n4Dicum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(File, avail_hist_fields(n),lprecision, dimidcz,ns) - endif - enddo ! num_avail_hist_fields_4Di + dimidcz(1) = imtid + dimidcz(2) = jmtid + dimidcz(3) = kmtidi + dimidcz(4) = cmtid + dimidcz(5) = timid + + do n = n3Dfcum + 1, n4Dicum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & + lprecision, dimidcz, varid) + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_4Di + !----------------------------------------------------------------- ! 4D (snow layers) - dimidcz(1) = imtid - dimidcz(2) = jmtid - dimidcz(3) = kmtids - dimidcz(4) = cmtid - dimidcz(5) = timid + !----------------------------------------------------------------- - do n = n4Dicum + 1, n4Dscum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(File, avail_hist_fields(n),lprecision, dimidcz,ns) - endif - enddo ! num_avail_hist_fields_4Ds + dimidcz(1) = imtid + dimidcz(2) = jmtid + dimidcz(3) = kmtids + dimidcz(4) = cmtid + dimidcz(5) = timid + do n = n4Dicum + 1, n4Dscum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & + lprecision, dimidcz, varid) + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_4Ds + + + !----------------------------------------------------------------- ! 4D (fsd layers) - dimidcz(1) = imtid - dimidcz(2) = jmtid - dimidcz(3) = fmtid - dimidcz(4) = cmtid - dimidcz(5) = timid + !----------------------------------------------------------------- - do n = n4Dscum + 1, n4Dfcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_hist_field_def(File, avail_hist_fields(n),lprecision, dimidcz,ns) - endif - enddo ! num_avail_hist_fields_4Df + dimidcz(1) = imtid + dimidcz(2) = jmtid + dimidcz(3) = fmtid + dimidcz(4) = cmtid + dimidcz(5) = timid + + do n = n4Dscum + 1, n4Dfcum + if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then + status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & + lprecision, dimidcz, varid) + call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) + endif + enddo ! num_avail_hist_fields_4Df !----------------------------------------------------------------- ! global attributes @@ -587,191 +625,133 @@ subroutine ice_write_hist (ns) ! ... the user should change these to something useful ... !----------------------------------------------------------------- #ifdef CESMCOUPLED - call ice_pio_check(pio_put_att(File,pio_global,'title',runid), & - subname//' ERROR: defining att title '//runid,file=__FILE__,line=__LINE__) + status = pio_put_att(File,pio_global,'title',runid) #else - title = 'sea ice model output for CICE' - call ice_pio_check(pio_put_att(File,pio_global,'title',trim(title)), & - subname//' ERROR: defining att title '//trim(title),file=__FILE__,line=__LINE__) + title = 'sea ice model output for CICE' + status = pio_put_att(File,pio_global,'title',trim(title)) #endif - title = 'Diagnostic and Prognostic Variables' - call ice_pio_check(pio_put_att(File,pio_global,'contents',trim(title)), & - subname//' ERROR: defining att contents '//trim(title),file=__FILE__,line=__LINE__) + title = 'Diagnostic and Prognostic Variables' + status = pio_put_att(File,pio_global,'contents',trim(title)) - write(title,'(2a)') 'CICE Sea Ice Model, ', trim(version_name) - call ice_pio_check(pio_put_att(File,pio_global,'source',trim(title)), & - subname//' ERROR: defining att source '//trim(title),file=__FILE__,line=__LINE__) + write(title,'(2a)') 'Los Alamos Sea Ice Model, ', trim(version_name) + status = pio_put_att(File,pio_global,'source',trim(title)) - if (use_leap_years) then - write(title,'(a,i3,a)') 'This year has ',dayyr,' days' - else - write(title,'(a,i3,a)') 'All years have exactly ',dayyr,' days' - endif - call ice_pio_check(pio_put_att(File,pio_global,'comment',trim(title)), & - subname//' ERROR: defining att comment '//trim(title),file=__FILE__,line=__LINE__) - - write(title,'(a,i8.8)') 'File written on model date ',idate - call ice_pio_check(pio_put_att(File,pio_global,'comment2',trim(title)), & - subname//' ERROR: defining att comment2 '//trim(title),file=__FILE__,line=__LINE__) - - write(title,'(a,i6)') 'seconds elapsed into model date: ',msec - call ice_pio_check(pio_put_att(File,pio_global,'comment3',trim(title)), & - subname//' ERROR: defining att comment3 '//trim(title),file=__FILE__,line=__LINE__) - - select case (histfreq(ns)) - case ("y", "Y") - write(time_period_freq,'(a,i0)') 'year_',histfreq_n(ns) - case ("m", "M") - write(time_period_freq,'(a,i0)') 'month_',histfreq_n(ns) - case ("d", "D") - write(time_period_freq,'(a,i0)') 'day_',histfreq_n(ns) - case ("h", "H") - write(time_period_freq,'(a,i0)') 'hour_',histfreq_n(ns) - case ("1") - write(time_period_freq,'(a,i0)') 'step_',histfreq_n(ns) - end select - - if (.not.write_ic .and. trim(time_period_freq) /= 'none') then - call ice_pio_check(pio_put_att(File,pio_global,'time_period_freq',trim(time_period_freq)), & - subname//' ERROR: defining att time_period_freq '//trim(time_period_freq),file=__FILE__,line=__LINE__) - endif + if (use_leap_years) then + write(title,'(a,i3,a)') 'This year has ',dayyr,' days' + else + write(title,'(a,i3,a)') 'All years have exactly ',dayyr,' days' + endif + status = pio_put_att(File,pio_global,'comment',trim(title)) - if (hist_avg(ns)) & - call ice_pio_check(pio_put_att(File,pio_global,'time_axis_position',trim(hist_time_axis)), & - subname//' ERROR: defining att time_axis_position '//trim(hist_time_axis),file=__FILE__,line=__LINE__) - - title = 'CF-1.0' - call ice_pio_check(pio_put_att(File,pio_global,'conventions',trim(title)), & - subname//' ERROR: defining att conventions '//trim(title),file=__FILE__,line=__LINE__) - - call date_and_time(date=current_date, time=current_time) - write(start_time,1000) current_date(1:4), current_date(5:6), & - current_date(7:8), current_time(1:2), & - current_time(3:4) -1000 format('This dataset was created on ', & - a,'-',a,'-',a,' at ',a,':',a) - call ice_pio_check(pio_put_att(File,pio_global,'history',trim(start_time)), & - subname//' ERROR: defining att history '//trim(start_time),file=__FILE__,line=__LINE__) - -#ifdef USE_PIO1 - call ice_pio_check(pio_put_att(File,pio_global,'io_flavor','io_pio1 '//trim(history_format)), & - subname//' ERROR: defining att io_flavor',file=__FILE__,line=__LINE__) -#else - call ice_pio_check(pio_put_att(File,pio_global,'io_flavor','io_pio2 '//trim(history_format)), & - subname//' ERROR: defining att io_flavor',file=__FILE__,line=__LINE__) -#endif + write(title,'(a,i8.8)') 'File written on model date ',idate + status = pio_put_att(File,pio_global,'comment2',trim(title)) + + write(title,'(a,i6)') 'seconds elapsed into model date: ',msec + status = pio_put_att(File,pio_global,'comment3',trim(title)) + + title = 'CF-1.0' + status = & + pio_put_att(File,pio_global,'conventions',trim(title)) + + call date_and_time(date=current_date, time=current_time) + write(start_time,1000) current_date(1:4), current_date(5:6), & + current_date(7:8), current_time(1:2), & + current_time(3:4) +1000 format('This dataset was created on ', & + a,'-',a,'-',a,' at ',a,':',a) + status = pio_put_att(File,pio_global,'history',trim(start_time)) + + if (history_format == 'pio_pnetcdf') then + status = pio_put_att(File,pio_global,'io_flavor','io_pio pnetcdf') + else + status = pio_put_att(File,pio_global,'io_flavor','io_pio netcdf') + endif !----------------------------------------------------------------- ! end define mode !----------------------------------------------------------------- - call ice_pio_check(pio_enddef(File), & - subname//' ERROR: ending pio definitions',file=__FILE__,line=__LINE__) + status = pio_enddef(File) !----------------------------------------------------------------- ! write time variable !----------------------------------------------------------------- - ltime2 = timesecs/secday ! hist_time_axis = 'end' (default) - - ! Some coupled models require the time axis "stamp" to be in the middle - ! or even beginning of averaging interval. - if (hist_avg(ns)) then - if (trim(hist_time_axis) == "begin" ) ltime2 = time_beg(ns) - if (trim(hist_time_axis) == "middle") ltime2 = p5*(time_beg(ns)+time_end(ns)) - endif - - call ice_pio_check(pio_inq_varid(File,'time',varid), & - subname//' ERROR: getting var time',file=__FILE__,line=__LINE__) - call ice_pio_check(pio_put_var(File,varid,(/1/),ltime2), & - subname//' ERROR: setting var time',file=__FILE__,line=__LINE__) + status = pio_inq_varid(File,'time',varid) + status = pio_put_var(File,varid,(/1/),ltime2) !----------------------------------------------------------------- ! write time_bounds info !----------------------------------------------------------------- - if (hist_avg(ns) .and. .not. write_ic) then - call ice_pio_check(pio_inq_varid(File,'time_bounds',varid), & - subname//' ERROR: getting time_bounds' ,file=__FILE__,line=__LINE__) - time_bounds=(/time_beg(ns),time_end(ns)/) - bnd_start = (/1,1/) - bnd_length = (/2,1/) - call ice_pio_check(pio_put_var(File,varid,ival=time_bounds,start=bnd_start(:),count=bnd_length(:)), & - subname//' ERROR: setting time_bounds' ,file=__FILE__,line=__LINE__) - endif + if (hist_avg) then + status = pio_inq_varid(File,'time_bounds',varid) + time_bounds=(/time_beg(ns),time_end(ns)/) + bnd_start = (/1,1/) + bnd_length = (/2,1/) + status = pio_put_var(File,varid,ival=time_bounds, & + start=bnd_start(:),count=bnd_length(:)) + endif !----------------------------------------------------------------- ! write coordinate variables !----------------------------------------------------------------- - allocate(workd2(nx_block,ny_block,nblocks)) - allocate(workr2(nx_block,ny_block,nblocks)) - - do i = 1,ncoord - if(icoord(i)) then - call ice_pio_check(pio_inq_varid(File, var_coord(i)%short_name, varid), & - subname//' ERROR: getting '//var_coord(i)%short_name ,file=__FILE__,line=__LINE__) - SELECT CASE (var_coord(i)%short_name) - CASE ('TLON') - ! Convert T grid longitude from -180 -> 180 to 0 to 360 - workd2(:,:,:) = mod(tlon(:,:,1:nblocks)*rad_to_deg + c360, c360) - CASE ('TLAT') - workd2(:,:,:) = tlat(:,:,1:nblocks)*rad_to_deg - CASE ('ULON') - workd2(:,:,:) = ulon(:,:,1:nblocks)*rad_to_deg - CASE ('ULAT') - workd2(:,:,:) = ulat(:,:,1:nblocks)*rad_to_deg - CASE ('NLON') - workd2(:,:,:) = nlon(:,:,1:nblocks)*rad_to_deg - CASE ('NLAT') - workd2(:,:,:) = nlat(:,:,1:nblocks)*rad_to_deg - CASE ('ELON') - workd2(:,:,:) = elon(:,:,1:nblocks)*rad_to_deg - CASE ('ELAT') - workd2(:,:,:) = elat(:,:,1:nblocks)*rad_to_deg - END SELECT - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc2d, & - workd2, status, fillval=spval_dbl) - else - workr2 = workd2 - call pio_write_darray(File, varid, iodesc2d, & - workr2, status, fillval=spval) - endif - - call ice_pio_check(status,subname//' ERROR: writing '//avail_hist_fields(n)%vname, & - file=__FILE__,line=__LINE__) - endif - enddo - - ! Extra dimensions (NCAT, NFSD, VGRD*) - - do i = 1, nvar_grdz - if (igrdz(i)) then - call ice_pio_check(pio_inq_varid(File, var_grdz(i)%short_name, varid), & - subname//' ERROR: getting '//var_grdz(i)%short_name,file=__FILE__,line=__LINE__) + allocate(workd2(nx_block,ny_block,nblocks)) + allocate(workr2(nx_block,ny_block,nblocks)) + + do i = 1,ncoord + status = pio_inq_varid(File, var_coord(i)%short_name, varid) + SELECT CASE (var_coord(i)%short_name) + CASE ('TLON') + ! Convert T grid longitude from -180 -> 180 to 0 to 360 + workd2(:,:,:) = mod(tlon(:,:,1:nblocks)*rad_to_deg + c360, c360) + CASE ('TLAT') + workd2(:,:,:) = tlat(:,:,1:nblocks)*rad_to_deg + CASE ('ULON') + workd2(:,:,:) = ulon(:,:,1:nblocks)*rad_to_deg + CASE ('ULAT') + workd2(:,:,:) = ulat(:,:,1:nblocks)*rad_to_deg + CASE ('NLON') + workd2(:,:,:) = nlon(:,:,1:nblocks)*rad_to_deg + CASE ('NLAT') + workd2(:,:,:) = nlat(:,:,1:nblocks)*rad_to_deg + CASE ('ELON') + workd2(:,:,:) = elon(:,:,1:nblocks)*rad_to_deg + CASE ('ELAT') + workd2(:,:,:) = elat(:,:,1:nblocks)*rad_to_deg + END SELECT + if (history_precision == 8) then + call pio_write_darray(File, varid, iodesc2d, & + workd2, status, fillval=spval_dbl) + else + workr2 = workd2 + call pio_write_darray(File, varid, iodesc2d, & + workr2, status, fillval=spval) + endif + enddo + + ! Extra dimensions (NCAT, NFSD, VGRD*) + + do i = 1, nvar_grdz + if (igrdz(i)) then + status = pio_inq_varid(File, var_grdz(i)%short_name, varid) SELECT CASE (var_grdz(i)%short_name) - CASE ('NCAT') - call ice_pio_check(pio_put_var(File, varid, hin_max(1:ncat_hist)), & - subname//' ERROR: setting '//var_grdz(i)%short_name,file=__FILE__,line=__LINE__) - CASE ('NFSD') - call ice_pio_check(pio_put_var(File, varid, floe_rad_c(1:nfsd_hist)), & - subname//' ERROR: setting '//var_grdz(i)%short_name,file=__FILE__,line=__LINE__) - CASE ('VGRDi') - call ice_pio_check(pio_put_var(File, varid, (/(k, k=1,nzilyr)/)), & - subname//' ERROR: setting '//var_grdz(i)%short_name,file=__FILE__,line=__LINE__) - CASE ('VGRDs') - call ice_pio_check(pio_put_var(File, varid, (/(k, k=1,nzslyr)/)), & - subname//' ERROR: setting '//var_grdz(i)%short_name,file=__FILE__,line=__LINE__) - CASE ('VGRDb') - call ice_pio_check(pio_put_var(File, varid, (/(k, k=1,nzblyr)/)), & - subname//' ERROR: setting '//var_grdz(i)%short_name,file=__FILE__,line=__LINE__) - CASE ('VGRDa') - call ice_pio_check(pio_put_var(File, varid, (/(k, k=1,nzalyr)/)), & - subname//' ERROR: setting '//var_grdz(i)%short_name,file=__FILE__,line=__LINE__) - END SELECT - endif - enddo + CASE ('NCAT') + status = pio_put_var(File, varid, hin_max(1:ncat_hist)) + CASE ('NFSD') + status = pio_put_var(File, varid, floe_rad_c(1:nfsd_hist)) + CASE ('VGRDi') + status = pio_put_var(File, varid, (/(k, k=1,nzilyr)/)) + CASE ('VGRDs') + status = pio_put_var(File, varid, (/(k, k=1,nzslyr)/)) + CASE ('VGRDb') + status = pio_put_var(File, varid, (/(k, k=1,nzblyr)/)) + CASE ('VGRDa') + status = pio_put_var(File, varid, (/(k, k=1,nzalyr)/)) + END SELECT + endif + enddo !----------------------------------------------------------------- ! write grid masks, area and rotation angle @@ -780,51 +760,50 @@ subroutine ice_write_hist (ns) do i = 1, nvar_grd if (igrd(i)) then SELECT CASE (var_grd(i)%req%short_name) - CASE ('tmask') - workd2 = hm(:,:,1:nblocks) - CASE ('umask') - workd2 = uvm(:,:,1:nblocks) - CASE ('nmask') - workd2 = npm(:,:,1:nblocks) - CASE ('emask') - workd2 = epm(:,:,1:nblocks) - CASE ('blkmask') - workd2 = bm(:,:,1:nblocks) - CASE ('tarea') - workd2 = tarea(:,:,1:nblocks) - CASE ('uarea') - workd2 = uarea(:,:,1:nblocks) - CASE ('narea') - workd2 = narea(:,:,1:nblocks) - CASE ('earea') - workd2 = earea(:,:,1:nblocks) - CASE ('dxt') - workd2 = dxT(:,:,1:nblocks) - CASE ('dyt') - workd2 = dyT(:,:,1:nblocks) - CASE ('dxu') - workd2 = dxU(:,:,1:nblocks) - CASE ('dyu') - workd2 = dyU(:,:,1:nblocks) - CASE ('dxn') - workd2 = dxN(:,:,1:nblocks) - CASE ('dyn') - workd2 = dyN(:,:,1:nblocks) - CASE ('dxe') - workd2 = dxE(:,:,1:nblocks) - CASE ('dye') - workd2 = dyE(:,:,1:nblocks) - CASE ('HTN') - workd2 = HTN(:,:,1:nblocks) - CASE ('HTE') - workd2 = HTE(:,:,1:nblocks) - CASE ('ANGLE') - workd2 = ANGLE(:,:,1:nblocks) - CASE ('ANGLET') - workd2 = ANGLET(:,:,1:nblocks) + CASE ('tmask') + workd2 = hm(:,:,1:nblocks) + CASE ('umask') + workd2 = uvm(:,:,1:nblocks) + CASE ('nmask') + workd2 = npm(:,:,1:nblocks) + CASE ('emask') + workd2 = epm(:,:,1:nblocks) + CASE ('blkmask') + workd2 = bm(:,:,1:nblocks) + CASE ('tarea') + workd2 = tarea(:,:,1:nblocks) + CASE ('uarea') + workd2 = uarea(:,:,1:nblocks) + CASE ('narea') + workd2 = narea(:,:,1:nblocks) + CASE ('earea') + workd2 = earea(:,:,1:nblocks) + CASE ('dxt') + workd2 = dxT(:,:,1:nblocks) + CASE ('dyt') + workd2 = dyT(:,:,1:nblocks) + CASE ('dxu') + workd2 = dxU(:,:,1:nblocks) + CASE ('dyu') + workd2 = dyU(:,:,1:nblocks) + CASE ('dxn') + workd2 = dxN(:,:,1:nblocks) + CASE ('dyn') + workd2 = dyN(:,:,1:nblocks) + CASE ('dxe') + workd2 = dxE(:,:,1:nblocks) + CASE ('dye') + workd2 = dyE(:,:,1:nblocks) + CASE ('HTN') + workd2 = HTN(:,:,1:nblocks) + CASE ('HTE') + workd2 = HTE(:,:,1:nblocks) + CASE ('ANGLE') + workd2 = ANGLE(:,:,1:nblocks) + CASE ('ANGLET') + workd2 = ANGLET(:,:,1:nblocks) END SELECT - call ice_pio_check(pio_inq_varid(File, var_grd(i)%req%short_name, varid), & - subname//' ERROR: getting '//var_grd(i)%req%short_name,file=__FILE__,line=__LINE__) + status = pio_inq_varid(File, var_grd(i)%req%short_name, varid) if (history_precision == 8) then call pio_write_darray(File, varid, iodesc2d, & workd2, status, fillval=spval_dbl) @@ -833,9 +812,6 @@ subroutine ice_write_hist (ns) call pio_write_darray(File, varid, iodesc2d, & workr2, status, fillval=spval) endif - - call ice_pio_check(status,subname//' ERROR: writing '//avail_hist_fields(n)%vname, & - file=__FILE__,line=__LINE__) endif enddo @@ -844,63 +820,60 @@ subroutine ice_write_hist (ns) !---------------------------------------------------------------- if (f_bounds) then - allocate(workd3v(nverts,nx_block,ny_block,nblocks)) - allocate(workr3v(nverts,nx_block,ny_block,nblocks)) - workd3v (:,:,:,:) = c0 - do i = 1, nvar_verts - SELECT CASE (var_nverts(i)%short_name) - CASE ('lont_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = lont_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('latt_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = latt_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('lonu_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = lonu_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('latu_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = latu_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('lonn_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = lonn_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('latn_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = latn_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('lone_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = lone_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('late_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = late_bounds(ivertex,:,:,1:nblocks) - enddo - END SELECT - - call ice_pio_check(pio_inq_varid(File, var_nverts(i)%short_name, varid), & - subname//' ERROR: getting '//var_nverts(i)%short_name,file=__FILE__,line=__LINE__) - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc3dv, & + allocate(workd3v(nverts,nx_block,ny_block,nblocks)) + allocate(workr3v(nverts,nx_block,ny_block,nblocks)) + workd3v (:,:,:,:) = c0 + do i = 1, nvar_verts + SELECT CASE (var_nverts(i)%short_name) + CASE ('lont_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = lont_bounds(ivertex,:,:,1:nblocks) + enddo + CASE ('latt_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = latt_bounds(ivertex,:,:,1:nblocks) + enddo + CASE ('lonu_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = lonu_bounds(ivertex,:,:,1:nblocks) + enddo + CASE ('latu_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = latu_bounds(ivertex,:,:,1:nblocks) + enddo + CASE ('lonn_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = lonn_bounds(ivertex,:,:,1:nblocks) + enddo + CASE ('latn_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = latn_bounds(ivertex,:,:,1:nblocks) + enddo + CASE ('lone_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = lone_bounds(ivertex,:,:,1:nblocks) + enddo + CASE ('late_bounds') + do ivertex = 1, nverts + workd3v(ivertex,:,:,:) = late_bounds(ivertex,:,:,1:nblocks) + enddo + END SELECT + + status = pio_inq_varid(File, var_nverts(i)%short_name, varid) + if (history_precision == 8) then + call pio_write_darray(File, varid, iodesc3dv, & workd3v, status, fillval=spval_dbl) - else - workr3v = workd3v - call pio_write_darray(File, varid, iodesc3dv, & - workr3v, status, fillval=spval) - endif - - call ice_pio_check(status,subname//' ERROR: writing '//avail_hist_fields(n)%vname, & - file=__FILE__,line=__LINE__) - enddo - deallocate(workd3v) - deallocate(workr3v) + else + workr3v = workd3v + call pio_write_darray(File, varid, iodesc3dv, & + workr3v, status, fillval=spval) + endif + enddo + deallocate(workd3v) + deallocate(workr3v) endif ! f_bounds + !----------------------------------------------------------------- ! write variable data !----------------------------------------------------------------- @@ -908,16 +881,15 @@ subroutine ice_write_hist (ns) ! 2D do n=1,num_avail_hist_fields_2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_pio_check(pio_inq_varid(File,avail_hist_fields(n)%vname,varid), & - subname//' ERROR: getting varid for '//avail_hist_fields(n)%vname,file=__FILE__,line=__LINE__) + status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) + if (status /= pio_noerr) call abort_ice(subname// & + 'ERROR getting varid for '//avail_hist_fields(n)%vname) workd2(:,:,:) = a2D(:,:,n,1:nblocks) - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) #ifdef CESM1_PIO call pio_setframe(varid, int(1,kind=PIO_OFFSET)) #else call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) #endif - call pio_seterrorhandling(File, PIO_RETURN_ERROR) if (history_precision == 8) then call pio_write_darray(File, varid, iodesc2d,& workd2, status, fillval=spval_dbl) @@ -926,9 +898,6 @@ subroutine ice_write_hist (ns) call pio_write_darray(File, varid, iodesc2d,& workr2, status, fillval=spval) endif - - call ice_pio_check(status,subname//' ERROR: writing '//avail_hist_fields(n)%vname, & - file=__FILE__,line=__LINE__) endif enddo ! num_avail_hist_fields_2D @@ -941,20 +910,19 @@ subroutine ice_write_hist (ns) do n = n2D + 1, n3Dccum nn = n - n2D if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_pio_check(pio_inq_varid(File,avail_hist_fields(n)%vname,varid), & - subname//' ERROR: getting varid for '//avail_hist_fields(n)%vname,file=__FILE__,line=__LINE__) + status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) + if (status /= pio_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) do j = 1, nblocks do i = 1, ncat_hist workd3(:,:,j,i) = a3Dc(:,:,i,nn,j) enddo enddo - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) #ifdef CESM1_PIO call pio_setframe(varid, int(1,kind=PIO_OFFSET)) #else call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) #endif - call pio_seterrorhandling(File, PIO_RETURN_ERROR) if (history_precision == 8) then call pio_write_darray(File, varid, iodesc3dc,& workd3, status, fillval=spval_dbl) @@ -963,9 +931,6 @@ subroutine ice_write_hist (ns) call pio_write_darray(File, varid, iodesc3dc,& workr3, status, fillval=spval) endif - - call ice_pio_check(status,subname//' ERROR: writing '//avail_hist_fields(n)%vname, & - file=__FILE__,line=__LINE__) endif enddo ! num_avail_hist_fields_3Dc deallocate(workd3) @@ -977,20 +942,19 @@ subroutine ice_write_hist (ns) do n = n3Dccum+1, n3Dzcum nn = n - n3Dccum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_pio_check(pio_inq_varid(File,avail_hist_fields(n)%vname,varid), & - subname//' ERROR: getting varid for '//avail_hist_fields(n)%vname,file=__FILE__,line=__LINE__) + status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) + if (status /= pio_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) do j = 1, nblocks do i = 1, nzilyr workd3(:,:,j,i) = a3Dz(:,:,i,nn,j) enddo enddo - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) #ifdef CESM1_PIO call pio_setframe(varid, int(1,kind=PIO_OFFSET)) #else call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) #endif - call pio_seterrorhandling(File, PIO_RETURN_ERROR) if (history_precision == 8) then call pio_write_darray(File, varid, iodesc3di,& workd3, status, fillval=spval_dbl) @@ -999,9 +963,6 @@ subroutine ice_write_hist (ns) call pio_write_darray(File, varid, iodesc3di,& workr3, status, fillval=spval) endif - - call ice_pio_check(status,subname//' ERROR: writing '//avail_hist_fields(n)%vname, & - file=__FILE__,line=__LINE__) endif enddo ! num_avail_hist_fields_3Dz deallocate(workd3) @@ -1013,20 +974,19 @@ subroutine ice_write_hist (ns) do n = n3Dzcum+1, n3Dbcum nn = n - n3Dzcum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_pio_check(pio_inq_varid(File,avail_hist_fields(n)%vname,varid), & - subname//' ERROR: getting varid for '//avail_hist_fields(n)%vname,file=__FILE__,line=__LINE__) + status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) + if (status /= pio_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) do j = 1, nblocks do i = 1, nzblyr workd3(:,:,j,i) = a3Db(:,:,i,nn,j) enddo enddo - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) #ifdef CESM1_PIO call pio_setframe(varid, int(1,kind=PIO_OFFSET)) #else call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) #endif - call pio_seterrorhandling(File, PIO_RETURN_ERROR) if (history_precision == 8) then call pio_write_darray(File, varid, iodesc3db,& workd3, status, fillval=spval_dbl) @@ -1035,9 +995,6 @@ subroutine ice_write_hist (ns) call pio_write_darray(File, varid, iodesc3db,& workr3, status, fillval=spval) endif - - call ice_pio_check(status,subname//' ERROR: writing '//avail_hist_fields(n)%vname, & - file=__FILE__,line=__LINE__) endif enddo ! num_avail_hist_fields_3Db deallocate(workd3) @@ -1049,20 +1006,19 @@ subroutine ice_write_hist (ns) do n = n3Dbcum+1, n3Dacum nn = n - n3Dbcum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_pio_check(pio_inq_varid(File,avail_hist_fields(n)%vname,varid), & - subname//' ERROR: getting varid for '//avail_hist_fields(n)%vname,file=__FILE__,line=__LINE__) + status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) + if (status /= pio_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) do j = 1, nblocks do i = 1, nzalyr workd3(:,:,j,i) = a3Da(:,:,i,nn,j) enddo enddo - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) #ifdef CESM1_PIO call pio_setframe(varid, int(1,kind=PIO_OFFSET)) #else call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) #endif - call pio_seterrorhandling(File, PIO_RETURN_ERROR) if (history_precision == 8) then call pio_write_darray(File, varid, iodesc3da,& workd3, status, fillval=spval_dbl) @@ -1071,9 +1027,6 @@ subroutine ice_write_hist (ns) call pio_write_darray(File, varid, iodesc3da,& workr3, status, fillval=spval) endif - - call ice_pio_check(status,subname//' ERROR: writing '//avail_hist_fields(n)%vname, & - file=__FILE__,line=__LINE__) endif enddo ! num_avail_hist_fields_3Db deallocate(workd3) @@ -1085,20 +1038,19 @@ subroutine ice_write_hist (ns) do n = n3Dacum+1, n3Dfcum nn = n - n3Dacum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_pio_check(pio_inq_varid(File,avail_hist_fields(n)%vname,varid), & - subname//' ERROR: getting varid for '//avail_hist_fields(n)%vname,file=__FILE__,line=__LINE__) + status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) + if (status /= pio_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) do j = 1, nblocks do i = 1, nfsd_hist workd3(:,:,j,i) = a3Df(:,:,i,nn,j) enddo enddo - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) #ifdef CESM1_PIO call pio_setframe(varid, int(1,kind=PIO_OFFSET)) #else call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) #endif - call pio_seterrorhandling(File, PIO_RETURN_ERROR) if (history_precision == 8) then call pio_write_darray(File, varid, iodesc3df,& workd3, status, fillval=spval_dbl) @@ -1107,9 +1059,6 @@ subroutine ice_write_hist (ns) call pio_write_darray(File, varid, iodesc3df,& workr3, status, fillval=spval) endif - - call ice_pio_check(status,subname//' ERROR: writing '//avail_hist_fields(n)%vname, & - file=__FILE__,line=__LINE__) endif enddo ! num_avail_hist_fields_3Df deallocate(workd3) @@ -1121,8 +1070,9 @@ subroutine ice_write_hist (ns) do n = n3Dfcum+1, n4Dicum nn = n - n3Dfcum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_pio_check(pio_inq_varid(File,avail_hist_fields(n)%vname,varid), & - subname//' ERROR: getting varid for '//avail_hist_fields(n)%vname,file=__FILE__,line=__LINE__) + status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) + if (status /= pio_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) do j = 1, nblocks do i = 1, ncat_hist do k = 1, nzilyr @@ -1130,13 +1080,11 @@ subroutine ice_write_hist (ns) enddo ! k enddo ! i enddo ! j - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) #ifdef CESM1_PIO call pio_setframe(varid, int(1,kind=PIO_OFFSET)) #else call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) #endif - call pio_seterrorhandling(File, PIO_RETURN_ERROR) if (history_precision == 8) then call pio_write_darray(File, varid, iodesc4di,& workd4, status, fillval=spval_dbl) @@ -1145,8 +1093,6 @@ subroutine ice_write_hist (ns) call pio_write_darray(File, varid, iodesc4di,& workr4, status, fillval=spval) endif - call ice_pio_check(status,subname//' ERROR: writing '//avail_hist_fields(n)%vname, & - file=__FILE__,line=__LINE__) endif enddo ! num_avail_hist_fields_4Di deallocate(workd4) @@ -1158,8 +1104,9 @@ subroutine ice_write_hist (ns) do n = n4Dicum+1, n4Dscum nn = n - n4Dicum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_pio_check(pio_inq_varid(File,avail_hist_fields(n)%vname,varid), & - subname//' ERROR: getting varid for '//avail_hist_fields(n)%vname,file=__FILE__,line=__LINE__) + status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) + if (status /= pio_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) do j = 1, nblocks do i = 1, ncat_hist do k = 1, nzslyr @@ -1167,13 +1114,11 @@ subroutine ice_write_hist (ns) enddo ! k enddo ! i enddo ! j - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) #ifdef CESM1_PIO call pio_setframe(varid, int(1,kind=PIO_OFFSET)) #else call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) #endif - call pio_seterrorhandling(File, PIO_RETURN_ERROR) if (history_precision == 8) then call pio_write_darray(File, varid, iodesc4ds,& workd4, status, fillval=spval_dbl) @@ -1182,9 +1127,6 @@ subroutine ice_write_hist (ns) call pio_write_darray(File, varid, iodesc4ds,& workr4, status, fillval=spval) endif - - call ice_pio_check(status,subname//' ERROR: writing '//avail_hist_fields(n)%vname, & - file=__FILE__,line=__LINE__) endif enddo ! num_avail_hist_fields_4Ds deallocate(workd4) @@ -1196,8 +1138,9 @@ subroutine ice_write_hist (ns) do n = n4Dscum+1, n4Dfcum nn = n - n4Dscum if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call ice_pio_check(pio_inq_varid(File,avail_hist_fields(n)%vname,varid), & - subname//' ERROR: getting varid for '//avail_hist_fields(n)%vname,file=__FILE__,line=__LINE__) + status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) + if (status /= pio_noerr) call abort_ice(subname// & + 'ERROR: getting varid for '//avail_hist_fields(n)%vname) do j = 1, nblocks do i = 1, ncat_hist do k = 1, nfsd_hist @@ -1205,13 +1148,11 @@ subroutine ice_write_hist (ns) enddo ! k enddo ! i enddo ! j - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) #ifdef CESM1_PIO call pio_setframe(varid, int(1,kind=PIO_OFFSET)) #else call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) #endif - call pio_seterrorhandling(File, PIO_RETURN_ERROR) if (history_precision == 8) then call pio_write_darray(File, varid, iodesc4df,& workd4, status, fillval=spval_dbl) @@ -1220,8 +1161,6 @@ subroutine ice_write_hist (ns) call pio_write_darray(File, varid, iodesc4df,& workr4, status, fillval=spval) endif - call ice_pio_check(status,subname//' ERROR: writing '//avail_hist_fields(n)%vname, & - file=__FILE__,line=__LINE__) endif enddo ! num_avail_hist_fields_4Df deallocate(workd4) @@ -1229,10 +1168,10 @@ subroutine ice_write_hist (ns) ! similarly for num_avail_hist_fields_4Db (define workd4b, iodesc4db) + !----------------------------------------------------------------- ! clean-up PIO descriptors !----------------------------------------------------------------- - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) call pio_freedecomp(File,iodesc2d) call pio_freedecomp(File,iodesc3dv) @@ -1252,160 +1191,61 @@ subroutine ice_write_hist (ns) call pio_closefile(File) if (my_task == master_task) then write(nu_diag,*) ' ' - write(nu_diag,*) 'Finished writing ',trim(ncfile) + write(nu_diag,*) 'Finished writing ',trim(ncfile(ns)) endif - first_call = .false. - end subroutine ice_write_hist - -!======================================================================= -! Defines a coordinate var in the history file -! coordinates have short_name, long_name and units attributes, -! and are compressed for 'hdf5' when more than one dimensional - - subroutine ice_hist_coord_def(File, coord,lprecision, dimids,varid) - - use pio, only: file_desc_t, var_desc_t, pio_def_var, pio_put_att -#ifndef USE_PIO1 - use pio, only: pio_def_var_deflate - use pio_nf, only: pio_def_var_chunking !This is missing from pio module <2.6.0 - use netcdf, only: NF90_CHUNKED - use ice_history_shared, only: history_deflate, history_chunksize, history_format -#endif - - type(file_desc_t), intent(inout) :: File - type(coord_attributes), intent(in) :: coord - integer(kind=int_kind), intent(in) :: dimids(:), lprecision - type(var_desc_t), intent(inout) :: varid - - ! local vars - integer(kind=int_kind) :: chunks(size(dimids)), i, status - - character(len=*), parameter :: subname = '(ice_hist_coord_def)' - - !define var, set deflate, long_name and units - status = pio_def_var(File, coord%short_name, lprecision, dimids, varid) - call ice_pio_check(status, & - subname//' ERROR: defining coord '//coord%short_name,file=__FILE__,line=__LINE__) -#ifndef USE_PIO1 - if (history_deflate/=0 .and. history_format=='hdf5') then - status = pio_def_var_deflate(File, varid, shuffle=0, deflate=1, deflate_level=history_deflate) - call ice_pio_check(status, & - subname//' ERROR: deflating coord '//coord%short_name,file=__FILE__,line=__LINE__) - endif - - if (history_format=='hdf5' .and. size(dimids)>1) then - if (dimids(1)==imtid .and. dimids(2)==jmtid) then - chunks(1)=history_chunksize(1) - chunks(2)=history_chunksize(2) - do i = 3, size(dimids) - chunks(i) = 0 - enddo - status = pio_def_var_chunking(File, varid, NF90_CHUNKED, chunks) - call ice_pio_check(status, & - subname//' ERROR: chunking coord '//coord%short_name,file=__FILE__,line=__LINE__) - endif - endif -#endif - call ice_pio_check(pio_put_att(File,varid,'long_name',trim(coord%long_name)), & - subname//' ERROR: defining att long_name '//coord%long_name,file=__FILE__,line=__LINE__) - call ice_pio_check(pio_put_att(File, varid, 'units', trim(coord%units)), & - subname//' ERROR: defining att units '//coord%units,file=__FILE__,line=__LINE__) - - end subroutine ice_hist_coord_def - !======================================================================= -! Defines a (time-dependent) history var in the history file -! variables have short_name, long_name and units, coordiantes and cell_measures attributes, -! and are compressed and chunked for 'hdf5' - - subroutine ice_hist_field_def(File, hfield,lprecision, dimids, ns) - - use pio, only: file_desc_t , var_desc_t, pio_def_var, pio_put_att -#ifndef USE_PIO1 - use pio, only: pio_def_var_deflate - use pio_nf, only: pio_def_var_chunking !This is missing from pio module <2.6.0 - use netcdf, only: NF90_CHUNKED - use ice_history_shared, only: history_deflate, history_chunksize, history_format -#endif - use ice_history_shared, only: ice_hist_field, history_precision, hist_avg - use ice_calendar, only: histfreq, histfreq_n, write_ic - - type(file_desc_t), intent(inout) :: File - type(ice_hist_field) , intent(in) :: hfield - integer(kind=int_kind), intent(in) :: dimids(:), lprecision, ns - ! local vars - type(var_desc_t) :: varid - integer(kind=int_kind) :: chunks(size(dimids)), i, status + subroutine ice_write_hist_attrs(File, varid, hfield, ns) - character(len=*), parameter :: subname = '(ice_hist_field_def)' - - status = pio_def_var(File, hfield%vname, lprecision, dimids, varid) - call ice_pio_check(status, & - subname//' ERROR: defining var '//hfield%vname,file=__FILE__,line=__LINE__) + use ice_kinds_mod + use ice_calendar, only: histfreq, histfreq_n + use ice_history_shared, only: ice_hist_field, history_precision, & + hist_avg + use ice_pio + use pio -#ifndef USE_PIO1 - if (history_deflate/=0 .and. history_format=='hdf5') then - status = pio_def_var_deflate(File, varid, shuffle=0, deflate=1, deflate_level=history_deflate) - call ice_pio_check(status, & - subname//' ERROR: deflating var '//hfield%vname,file=__FILE__,line=__LINE__) - endif + type(file_desc_t) :: File ! file id + type(var_desc_t) :: varid ! variable id + type (ice_hist_field), intent(in) :: hfield ! history file info + integer (kind=int_kind), intent(in) :: ns - if (history_format=='hdf5' .and. size(dimids)>1) then - if (dimids(1)==imtid .and. dimids(2)==jmtid) then - chunks(1)=history_chunksize(1) - chunks(2)=history_chunksize(2) - do i = 3, size(dimids) - chunks(i) = 0 - enddo - status = pio_def_var_chunking(File, varid, NF90_CHUNKED, chunks) - call ice_pio_check(status, subname//' ERROR: chunking var '//hfield%vname,file=__FILE__,line=__LINE__) - endif - endif -#endif + ! local variables - !var attributes + integer (kind=int_kind) :: status + character(len=*), parameter :: subname = '(ice_write_hist_attrs)' - call ice_pio_check(pio_put_att(File,varid,'units', trim(hfield%vunit)), & - subname//' ERROR: defining att units '//trim(hfield%vunit),file=__FILE__,line=__LINE__) + status = pio_put_att(File,varid,'units', trim(hfield%vunit)) - call ice_pio_check(pio_put_att(File,varid, 'long_name', trim(hfield%vdesc)), & - subname//' ERROR: defining att long_name '//trim(hfield%vdesc),file=__FILE__,line=__LINE__) + status = pio_put_att(File,varid, 'long_name', trim(hfield%vdesc)) - call ice_pio_check(pio_put_att(File,varid,'coordinates', trim(hfield%vcoord)), & - subname//' ERROR: defining att coordinates '//trim(hfield%vdesc),file=__FILE__,line=__LINE__) + status = pio_put_att(File,varid,'coordinates', trim(hfield%vcoord)) - call ice_pio_check(pio_put_att(File,varid,'cell_measures',trim(hfield%vcellmeas)), & - subname//' ERROR: defining att cell_measures '//trim(hfield%vcoord),file=__FILE__,line=__LINE__) + status = pio_put_att(File,varid,'cell_measures', trim(hfield%vcellmeas)) if (hfield%vcomment /= "none") then - call ice_pio_check(pio_put_att(File,varid,'comment', trim(hfield%vcomment)), & - subname//' ERROR: defining att comment '//trim(hfield%vcomment),file=__FILE__,line=__LINE__) + status = pio_put_att(File,varid,'comment', trim(hfield%vcomment)) endif call ice_write_hist_fill(File,varid,hfield%vname,history_precision) ! Add cell_methods attribute to variables if averaged - if (hist_avg(ns) .and. .not. write_ic) then + if (hist_avg) then if (TRIM(hfield%vname(1:4))/='sig1' & .and.TRIM(hfield%vname(1:4))/='sig2' & .and.TRIM(hfield%vname(1:9))/='sistreave' & .and.TRIM(hfield%vname(1:9))/='sistremax' & .and.TRIM(hfield%vname(1:4))/='sigP') then - call ice_pio_check(pio_put_att(File,varid,'cell_methods','time: mean'), & - subname//' ERROR: defining att cell_methods',file=__FILE__,line=__LINE__) + status = pio_put_att(File,varid,'cell_methods','time: mean') endif endif if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg(ns) & - .or. write_ic & + .or..not. hist_avg & .or.TRIM(hfield%vname(1:4))=='divu' & .or.TRIM(hfield%vname(1:5))=='shear' & - .or.TRIM(hfield%vname(1:4))=='vort' & .or.TRIM(hfield%vname(1:4))=='sig1' & .or.TRIM(hfield%vname(1:4))=='sig2' & .or.TRIM(hfield%vname(1:4))=='sigP' & @@ -1416,26 +1256,25 @@ subroutine ice_hist_field_def(File, hfield,lprecision, dimids, ns) .or.TRIM(hfield%vname(1:9))=='frz_onset' & .or.TRIM(hfield%vname(1:6))=='hisnap' & .or.TRIM(hfield%vname(1:6))=='aisnap') then - call ice_pio_check(pio_put_att(File,varid,'time_rep','instantaneous'), & - subname//' ERROR: defining att time_rep i',file=__FILE__,line=__LINE__) + status = pio_put_att(File,varid,'time_rep','instantaneous') else - call ice_pio_check(pio_put_att(File,varid,'time_rep','averaged'), & - subname//' ERROR: defining att time_rep a',file=__FILE__,line=__LINE__) + status = pio_put_att(File,varid,'time_rep','averaged') endif - end subroutine ice_hist_field_def + end subroutine ice_write_hist_attrs !======================================================================= -! Defines missing_value and _FillValue attributes subroutine ice_write_hist_fill(File,varid,vname,precision) - use pio, only: pio_put_att, file_desc_t, var_desc_t + use ice_kinds_mod + use ice_pio + use pio - type(file_desc_t), intent(inout) :: File - type(var_desc_t), intent(in) :: varid - character(len=*), intent(in) :: vname - integer (kind=int_kind), intent(in) :: precision + type(file_desc_t) , intent(inout) :: File + type(var_desc_t) , intent(in) :: varid + character(len=*), intent(in) :: vname ! var name + integer (kind=int_kind), intent(in) :: precision ! precision ! local variables @@ -1443,15 +1282,11 @@ subroutine ice_write_hist_fill(File,varid,vname,precision) character(len=*), parameter :: subname = '(ice_write_hist_fill)' if (precision == 8) then - call ice_pio_check(pio_put_att(File, varid, 'missing_value', spval_dbl), & - subname//' ERROR: defining att missing_value',file=__FILE__,line=__LINE__) - call ice_pio_check(pio_put_att(File, varid,'_FillValue',spval_dbl), & - subname//' ERROR: defining att _FillValue',file=__FILE__,line=__LINE__) + status = pio_put_att(File, varid, 'missing_value', spval_dbl) + status = pio_put_att(File, varid,'_FillValue',spval_dbl) else - call ice_pio_check(pio_put_att(File, varid, 'missing_value', spval), & - subname//' ERROR: defining att missing_value',file=__FILE__,line=__LINE__) - call ice_pio_check(pio_put_att(File, varid,'_FillValue',spval), & - subname//' ERROR: defining att _FillValue',file=__FILE__,line=__LINE__) + status = pio_put_att(File, varid, 'missing_value', spval) + status = pio_put_att(File, varid,'_FillValue',spval) endif end subroutine ice_write_hist_fill diff --git a/cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 index fdb9330d2..4d2341b29 100644 --- a/cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 @@ -6,11 +6,13 @@ module ice_restart use ice_broadcast - use ice_communicate, only: my_task, master_task use ice_exit, only: abort_ice use ice_fileunits, only: nu_diag, nu_restart, nu_rst_pointer use ice_kinds_mod - use ice_restart_shared + use ice_restart_shared, only: & + restart, restart_ext, restart_dir, restart_file, pointer_file, & + runid, runtype, use_restart_time, restart_format, lcdf64, lenstr, & + restart_coszen use ice_pio use pio use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted @@ -20,8 +22,7 @@ module ice_restart implicit none private public :: init_restart_write, init_restart_read, & - read_restart_field, write_restart_field, final_restart, & - query_field + read_restart_field, write_restart_field, final_restart type(file_desc_t) :: File type(var_desc_t) :: vardesc @@ -29,8 +30,6 @@ module ice_restart type(io_desc_t) :: iodesc2d type(io_desc_t) :: iodesc3d_ncat - integer (kind=int_kind) :: dimid_ni, dimid_nj - !======================================================================= contains @@ -44,6 +43,7 @@ subroutine init_restart_read(ice_ic) use ice_calendar, only: istep0, istep1, myear, mmonth, & mday, msec, npt + use ice_communicate, only: my_task, master_task use ice_domain_size, only: ncat use ice_read_write, only: ice_open @@ -54,9 +54,9 @@ subroutine init_restart_read(ice_ic) character(len=char_len_long) :: & filename, filename0 - integer (kind=int_kind) :: status + integer (kind=int_kind) :: status, status1 - logical (kind=log_kind), save :: first_call = .true. + integer (kind=int_kind) :: iotype character(len=*), parameter :: subname = '(init_restart_read)' @@ -77,54 +77,40 @@ subroutine init_restart_read(ice_ic) write(nu_diag,*) 'Using restart dump=', trim(filename) end if - File%fh=-1 -! tcraig, including fformat here causes some problems when restart_format=hdf5 -! and reading non hdf5 files with spack built PIO. Excluding the fformat -! argument here defaults the PIO format to cdf1 which then reads -! any netcdf format file fine. - call ice_pio_init(mode='read', filename=trim(filename), File=File, & -! fformat=trim(restart_format), rearr=trim(restart_rearranger), & - rearr=trim(restart_rearranger), & - iotasks=restart_iotasks, root=restart_root, stride=restart_stride, & - debug=first_call) - - call pio_seterrorhandling(File, PIO_RETURN_ERROR) - - call ice_pio_initdecomp(iodesc=iodesc2d, precision=8) - call ice_pio_initdecomp(ndim3=ncat, iodesc=iodesc3d_ncat, remap=.true., precision=8) - - if (use_restart_time) then - ! for backwards compatibility, check nyr, month, and sec as well - call ice_pio_check(pio_get_att(File, pio_global, 'istep1', istep0), & - subname//" ERROR: reading restart step ",file=__FILE__,line=__LINE__) - - status = pio_get_att(File, pio_global, 'myear', myear) - if (status /= PIO_NOERR) then - call ice_pio_check(pio_get_att(File, pio_global, 'nyr', myear), & - subname//" ERROR: reading restart year ",file=__FILE__,line=__LINE__) - endif - - status = pio_get_att(File, pio_global, 'mmonth', mmonth) - if (status /= PIO_NOERR) then - call ice_pio_check(pio_get_att(File, pio_global, 'month', mmonth), & - subname//" ERROR: reading restart month ",file=__FILE__,line=__LINE__) - endif - - call ice_pio_check(pio_get_att(File, pio_global, 'mday', mday), & - subname//" ERROR: reading restart day ",file=__FILE__,line=__LINE__) - - status = pio_get_att(File, pio_global, 'msec', msec) - if (status /= PIO_NOERR) then - call ice_pio_check(pio_get_att(File, pio_global, 'sec', msec), & - subname//" ERROR: reading restart sec ",file=__FILE__,line=__LINE__) - endif - endif ! use namelist values if use_restart_time = F - - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) +! if (restart_format(1:3) == 'pio') then + iotype = PIO_IOTYPE_NETCDF + if (restart_format == 'pio_pnetcdf') iotype = PIO_IOTYPE_PNETCDF + File%fh=-1 + call ice_pio_init(mode='read', filename=trim(filename), File=File, iotype=iotype) + + call ice_pio_initdecomp(iodesc=iodesc2d, precision=8) + call ice_pio_initdecomp(ndim3=ncat , iodesc=iodesc3d_ncat,remap=.true., precision=8) + + if (use_restart_time) then + status1 = PIO_noerr + status = pio_get_att(File, pio_global, 'istep1', istep0) +! status = pio_get_att(File, pio_global, 'time', time) +! status = pio_get_att(File, pio_global, 'time_forc', time_forc) + call pio_seterrorhandling(File, PIO_BCAST_ERROR) + status = pio_get_att(File, pio_global, 'myear', myear) + if (status /= PIO_noerr) status = pio_get_att(File, pio_global, 'nyr', myear) + if (status /= PIO_noerr) status1 = status + status = pio_get_att(File, pio_global, 'mmonth', mmonth) + if (status /= PIO_noerr) status = pio_get_att(File, pio_global, 'month', mmonth) + if (status /= PIO_noerr) status1 = status + status = pio_get_att(File, pio_global, 'mday', mday) + if (status /= PIO_noerr) status1 = status + status = pio_get_att(File, pio_global, 'msec', msec) + if (status /= PIO_noerr) status = pio_get_att(File, pio_global, 'sec', msec) + if (status /= PIO_noerr) status1 = status + if (status1 /= PIO_noerr) & + call abort_ice(subname//"ERROR: reading restart time ") + call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) + endif ! use namelist values if use_restart_time = F +! endif if (my_task == master_task) then - write(nu_diag,'(a,i8,4x,i4.4,a,i2.2,a,i2.2,a,i5.5)') & - 'Restart read at istep=',istep0,myear,'-',mmonth,'-',mday,'-',msec + write(nu_diag,*) 'Restart read at istep=',istep0,myear,mmonth,mday,msec endif call broadcast_scalar(istep0,master_task) @@ -132,6 +118,9 @@ subroutine init_restart_read(ice_ic) call broadcast_scalar(mmonth,master_task) call broadcast_scalar(mday,master_task) call broadcast_scalar(msec,master_task) +! call broadcast_scalar(time,master_task) +! call broadcast_scalar(time_forc,master_task) + call broadcast_scalar(myear,master_task) istep1 = istep0 @@ -140,8 +129,6 @@ subroutine init_restart_read(ice_ic) npt = npt - istep0 endif - first_call = .false. - end subroutine init_restart_read !======================================================================= @@ -152,73 +139,76 @@ end subroutine init_restart_read subroutine init_restart_write(filename_spec) use ice_calendar, only: msec, mmonth, mday, myear, istep1 + use ice_communicate, only: my_task, master_task use ice_domain_size, only: nx_global, ny_global, ncat, nilyr, nslyr, & n_iso, n_aero, nblyr, n_zaero, n_algae, n_doc, & n_dic, n_don, n_fed, n_fep, nfsd use ice_dyn_shared, only: kdyn use ice_arrays_column, only: oceanmixed_ice - use ice_grid, only: grid_ice - - character(len=char_len_long), intent(in), optional :: filename_spec - - ! local variables logical (kind=log_kind) :: & - skl_bgc, z_tracers + solve_zsal, skl_bgc, z_tracers logical (kind=log_kind) :: & - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & - tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow, & - tr_bgc_N, tr_bgc_C, tr_bgc_Nit, & - tr_bgc_Sil, tr_bgc_DMS, & - tr_bgc_chl, tr_bgc_Am, & - tr_bgc_PON, tr_bgc_DON, & - tr_zaero, tr_bgc_Fe, & - tr_bgc_hum, tr_fsd + tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & + tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow, & + tr_bgc_N, tr_bgc_C, tr_bgc_Nit, & + tr_bgc_Sil, tr_bgc_DMS, & + tr_bgc_chl, tr_bgc_Am, & + tr_bgc_PON, tr_bgc_DON, & + tr_zaero, tr_bgc_Fe, & + tr_bgc_hum, tr_fsd + + integer (kind=int_kind) :: & + nbtrcr + + character(len=char_len_long), intent(in), optional :: filename_spec - integer (kind=int_kind) :: nbtrcr + ! local variables character(len=char_len_long) :: filename - integer (kind=int_kind) :: & - dimid_ncat, dimid_nilyr, dimid_nslyr, dimid_naero + integer (kind=int_kind) :: dimid_ni, dimid_nj, dimid_ncat, & + dimid_nilyr, dimid_nslyr, dimid_naero integer (kind=int_kind), allocatable :: dims(:) - integer (kind=int_kind) :: k, n ! loop index + integer (kind=int_kind) :: iotype - character (len=3) :: nchar, ncharb + integer (kind=int_kind) :: & + k, n, & ! loop index + status ! status variable from netCDF routine - logical (kind=log_kind), save :: first_call = .true. + character (len=3) :: nchar, ncharb character(len=*), parameter :: subname = '(init_restart_write)' call icepack_query_tracer_sizes(nbtrcr_out=nbtrcr) call icepack_query_tracer_flags( & - tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, & - tr_iso_out=tr_iso, tr_aero_out=tr_aero, & - tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & - tr_snow_out=tr_snow, tr_brine_out=tr_brine, & - tr_bgc_N_out=tr_bgc_N, tr_bgc_C_out=tr_bgc_C, tr_bgc_Nit_out=tr_bgc_Nit, & - tr_bgc_Sil_out=tr_bgc_Sil, tr_bgc_DMS_out=tr_bgc_DMS, & - tr_bgc_chl_out=tr_bgc_chl, tr_bgc_Am_out=tr_bgc_Am, & - tr_bgc_PON_out=tr_bgc_PON, tr_bgc_DON_out=tr_bgc_DON, & - tr_zaero_out=tr_zaero, tr_bgc_Fe_out=tr_bgc_Fe, & - tr_bgc_hum_out=tr_bgc_hum, tr_fsd_out=tr_fsd) - call icepack_query_parameters(skl_bgc_out=skl_bgc, & - z_tracers_out=z_tracers) + tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, & + tr_iso_out=tr_iso, tr_aero_out=tr_aero, & + tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & + tr_snow_out=tr_snow, tr_brine_out=tr_brine, & + tr_bgc_N_out=tr_bgc_N, tr_bgc_C_out=tr_bgc_C, tr_bgc_Nit_out=tr_bgc_Nit, & + tr_bgc_Sil_out=tr_bgc_Sil, tr_bgc_DMS_out=tr_bgc_DMS, & + tr_bgc_chl_out=tr_bgc_chl, tr_bgc_Am_out=tr_bgc_Am, & + tr_bgc_PON_out=tr_bgc_PON, tr_bgc_DON_out=tr_bgc_DON, & + tr_zaero_out=tr_zaero, tr_bgc_Fe_out=tr_bgc_Fe, & + tr_bgc_hum_out=tr_bgc_hum, tr_fsd_out=tr_fsd) + call icepack_query_parameters(solve_zsal_out=solve_zsal, skl_bgc_out=skl_bgc, & + z_tracers_out=z_tracers) call icepack_warnings_flush(nu_diag) if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) + file=__FILE__, line=__LINE__) ! construct path/file if (present(filename_spec)) then filename = trim(filename_spec) else write(filename,'(a,a,a,i4.4,a,i2.2,a,i2.2,a,i5.5)') & - restart_dir(1:lenstr(restart_dir)), & - restart_file(1:lenstr(restart_file)),'.', & - myear,'-',mmonth,'-',mday,'-',msec + restart_dir(1:lenstr(restart_dir)), & + restart_file(1:lenstr(restart_file)),'.', & + myear,'-',mmonth,'-',mday,'-',msec end if if (restart_format(1:3) /= 'bin') filename = trim(filename) // '.nc' @@ -230,126 +220,96 @@ subroutine init_restart_write(filename_spec) close(nu_rst_pointer) endif - File%fh=-1 - call ice_pio_init(mode='write',filename=trim(filename), File=File, & - clobber=.true., fformat=trim(restart_format), rearr=trim(restart_rearranger), & - iotasks=restart_iotasks, root=restart_root, stride=restart_stride, & - debug=first_call) - - call pio_seterrorhandling(File, PIO_RETURN_ERROR) - - call ice_pio_check(pio_put_att(File,pio_global,'istep1',istep1), & - subname//' ERROR: writing restart step',file=__FILE__,line=__LINE__) - call ice_pio_check(pio_put_att(File,pio_global,'myear',myear), & - subname//' ERROR: writing restart year',file=__FILE__,line=__LINE__) - call ice_pio_check(pio_put_att(File,pio_global,'mmonth',mmonth), & - subname//' ERROR: writing restart month',file=__FILE__,line=__LINE__) - call ice_pio_check(pio_put_att(File,pio_global,'mday',mday), & - subname//' ERROR: writing restart day',file=__FILE__,line=__LINE__) - call ice_pio_check(pio_put_att(File,pio_global,'msec',msec), & - subname//' ERROR: writing restart sec',file=__FILE__,line=__LINE__) - - call ice_pio_check(pio_def_dim(File,'ni',nx_global,dimid_ni), & - subname//' ERROR: defining restart dim ni',file=__FILE__,line=__LINE__) - call ice_pio_check(pio_def_dim(File,'nj',ny_global,dimid_nj), & - subname//' ERROR: defining restart dim nj',file=__FILE__,line=__LINE__) - call ice_pio_check(pio_def_dim(File,'ncat',ncat,dimid_ncat), & - subname//' ERROR: defining restart dim ncat',file=__FILE__,line=__LINE__) +! if (restart_format(1:3) == 'pio') then + + iotype = PIO_IOTYPE_NETCDF + if (restart_format == 'pio_pnetcdf') iotype = PIO_IOTYPE_PNETCDF + File%fh=-1 + call ice_pio_init(mode='write',filename=trim(filename), File=File, & + clobber=.true., cdf64=lcdf64, iotype=iotype) + + status = pio_put_att(File,pio_global,'istep1',istep1) +! status = pio_put_att(File,pio_global,'time',time) +! status = pio_put_att(File,pio_global,'time_forc',time_forc) + status = pio_put_att(File,pio_global,'myear',myear) + status = pio_put_att(File,pio_global,'mmonth',mmonth) + status = pio_put_att(File,pio_global,'mday',mday) + status = pio_put_att(File,pio_global,'msec',msec) + + status = pio_def_dim(File,'ni',nx_global,dimid_ni) + status = pio_def_dim(File,'nj',ny_global,dimid_nj) + status = pio_def_dim(File,'ncat',ncat,dimid_ncat) !----------------------------------------------------------------- ! 2D restart fields !----------------------------------------------------------------- - allocate(dims(2)) + allocate(dims(2)) - dims(1) = dimid_ni - dims(2) = dimid_nj + dims(1) = dimid_ni + dims(2) = dimid_nj - call define_rest_field(File,'uvel',dims) - call define_rest_field(File,'vvel',dims) + call define_rest_field(File,'uvel',dims) + call define_rest_field(File,'vvel',dims) + if (restart_coszen) call define_rest_field(File,'coszen',dims) + call define_rest_field(File,'scale_factor',dims) + call define_rest_field(File,'swvdr',dims) + call define_rest_field(File,'swvdf',dims) + call define_rest_field(File,'swidr',dims) + call define_rest_field(File,'swidf',dims) - if (grid_ice == 'CD') then - call define_rest_field(File,'uvelE',dims) - call define_rest_field(File,'vvelE',dims) - call define_rest_field(File,'uvelN',dims) - call define_rest_field(File,'vvelN',dims) - endif + call define_rest_field(File,'strocnxT',dims) + call define_rest_field(File,'strocnyT',dims) - if (grid_ice == 'C') then - call define_rest_field(File,'uvelE',dims) - call define_rest_field(File,'vvelN',dims) - endif + call define_rest_field(File,'stressp_1',dims) + call define_rest_field(File,'stressp_2',dims) + call define_rest_field(File,'stressp_3',dims) + call define_rest_field(File,'stressp_4',dims) - if (restart_coszen) call define_rest_field(File,'coszen',dims) - call define_rest_field(File,'scale_factor',dims) - call define_rest_field(File,'swvdr',dims) - call define_rest_field(File,'swvdf',dims) - call define_rest_field(File,'swidr',dims) - call define_rest_field(File,'swidf',dims) - - call define_rest_field(File,'strocnxT',dims) - call define_rest_field(File,'strocnyT',dims) - - call define_rest_field(File,'stressp_1',dims) - call define_rest_field(File,'stressp_2',dims) - call define_rest_field(File,'stressp_3',dims) - call define_rest_field(File,'stressp_4',dims) - - call define_rest_field(File,'stressm_1',dims) - call define_rest_field(File,'stressm_2',dims) - call define_rest_field(File,'stressm_3',dims) - call define_rest_field(File,'stressm_4',dims) - - call define_rest_field(File,'stress12_1',dims) - call define_rest_field(File,'stress12_2',dims) - call define_rest_field(File,'stress12_3',dims) - call define_rest_field(File,'stress12_4',dims) - - call define_rest_field(File,'iceumask',dims) - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - call define_rest_field(File,'stresspT' ,dims) - call define_rest_field(File,'stressmT' ,dims) - call define_rest_field(File,'stress12T',dims) - call define_rest_field(File,'stresspU' ,dims) - call define_rest_field(File,'stressmU' ,dims) - call define_rest_field(File,'stress12U',dims) - call define_rest_field(File,'icenmask',dims) - call define_rest_field(File,'iceemask',dims) - endif + call define_rest_field(File,'stressm_1',dims) + call define_rest_field(File,'stressm_2',dims) + call define_rest_field(File,'stressm_3',dims) + call define_rest_field(File,'stressm_4',dims) - if (oceanmixed_ice) then - call define_rest_field(File,'sst',dims) - call define_rest_field(File,'frzmlt',dims) - endif + call define_rest_field(File,'stress12_1',dims) + call define_rest_field(File,'stress12_2',dims) + call define_rest_field(File,'stress12_3',dims) + call define_rest_field(File,'stress12_4',dims) - if (tr_FY) then - call define_rest_field(File,'frz_onset',dims) - end if + call define_rest_field(File,'iceumask',dims) - if (kdyn == 2) then - call define_rest_field(File,'a11_1',dims) - call define_rest_field(File,'a11_2',dims) - call define_rest_field(File,'a11_3',dims) - call define_rest_field(File,'a11_4',dims) - call define_rest_field(File,'a12_1',dims) - call define_rest_field(File,'a12_2',dims) - call define_rest_field(File,'a12_3',dims) - call define_rest_field(File,'a12_4',dims) - endif + if (oceanmixed_ice) then + call define_rest_field(File,'sst',dims) + call define_rest_field(File,'frzmlt',dims) + endif - if (tr_pond_lvl) then - call define_rest_field(File,'fsnow',dims) - endif + if (tr_FY) then + call define_rest_field(File,'frz_onset',dims) + end if + + if (kdyn == 2) then + call define_rest_field(File,'a11_1',dims) + call define_rest_field(File,'a11_2',dims) + call define_rest_field(File,'a11_3',dims) + call define_rest_field(File,'a11_4',dims) + call define_rest_field(File,'a12_1',dims) + call define_rest_field(File,'a12_2',dims) + call define_rest_field(File,'a12_3',dims) + call define_rest_field(File,'a12_4',dims) + endif + + if (tr_pond_lvl) then + call define_rest_field(File,'fsnow',dims) + endif - if (nbtrcr > 0) then - if (tr_bgc_N) then + if (nbtrcr > 0) then + if (tr_bgc_N) then do k=1,n_algae write(nchar,'(i3.3)') k call define_rest_field(File,'algalN'//trim(nchar),dims) enddo - endif - if (tr_bgc_C) then + endif + if (tr_bgc_C) then do k=1,n_doc write(nchar,'(i3.3)') k call define_rest_field(File,'doc'//trim(nchar),dims) @@ -358,25 +318,25 @@ subroutine init_restart_write(filename_spec) write(nchar,'(i3.3)') k call define_rest_field(File,'dic'//trim(nchar),dims) enddo - endif - call define_rest_field(File,'nit' ,dims) - if (tr_bgc_Am) & + endif + call define_rest_field(File,'nit' ,dims) + if (tr_bgc_Am) & call define_rest_field(File,'amm' ,dims) - if (tr_bgc_Sil) & + if (tr_bgc_Sil) & call define_rest_field(File,'sil' ,dims) - if (tr_bgc_hum) & + if (tr_bgc_hum) & call define_rest_field(File,'hum' ,dims) - if (tr_bgc_DMS) then - call define_rest_field(File,'dmsp' ,dims) - call define_rest_field(File,'dms' ,dims) - endif - if (tr_bgc_DON) then + if (tr_bgc_DMS) then + call define_rest_field(File,'dmsp' ,dims) + call define_rest_field(File,'dms' ,dims) + endif + if (tr_bgc_DON) then do k=1,n_don write(nchar,'(i3.3)') k call define_rest_field(File,'don'//trim(nchar),dims) enddo - endif - if (tr_bgc_Fe ) then + endif + if (tr_bgc_Fe ) then do k=1,n_fed write(nchar,'(i3.3)') k call define_rest_field(File,'fed'//trim(nchar),dims) @@ -385,305 +345,314 @@ subroutine init_restart_write(filename_spec) write(nchar,'(i3.3)') k call define_rest_field(File,'fep'//trim(nchar),dims) enddo - endif - if (tr_zaero) then + endif + if (tr_zaero) then do k=1,n_zaero write(nchar,'(i3.3)') k call define_rest_field(File,'zaeros'//trim(nchar),dims) enddo - endif - endif !nbtrcr + endif + endif !nbtrcr - deallocate(dims) + if (solve_zsal) call define_rest_field(File,'sss',dims) + + deallocate(dims) !----------------------------------------------------------------- ! 3D restart fields (ncat) !----------------------------------------------------------------- - allocate(dims(3)) - - dims(1) = dimid_ni - dims(2) = dimid_nj - dims(3) = dimid_ncat - - call define_rest_field(File,'aicen',dims) - call define_rest_field(File,'vicen',dims) - call define_rest_field(File,'vsnon',dims) - call define_rest_field(File,'Tsfcn',dims) - - if (tr_iage) then - call define_rest_field(File,'iage',dims) - end if - - if (tr_FY) then - call define_rest_field(File,'FY',dims) - end if - - if (tr_lvl) then - call define_rest_field(File,'alvl',dims) - call define_rest_field(File,'vlvl',dims) - end if - - if (tr_pond_topo) then - call define_rest_field(File,'apnd',dims) - call define_rest_field(File,'hpnd',dims) - call define_rest_field(File,'ipnd',dims) - end if - - if (tr_pond_lvl) then - call define_rest_field(File,'apnd',dims) - call define_rest_field(File,'hpnd',dims) - call define_rest_field(File,'ipnd',dims) - call define_rest_field(File,'dhs',dims) - call define_rest_field(File,'ffrac',dims) - end if - - if (tr_brine) then - call define_rest_field(File,'fbrn',dims) - call define_rest_field(File,'first_ice',dims) - endif - - if (skl_bgc) then - do k = 1, n_algae - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_N'//trim(nchar) ,dims) - enddo - if (tr_bgc_C) then - ! do k = 1, n_algae - ! write(nchar,'(i3.3)') k - ! call define_rest_field(File,'bgc_C'//trim(nchar) ,dims) - ! enddo - do k = 1, n_doc - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DOC'//trim(nchar) ,dims) - enddo - do k = 1, n_dic - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DIC'//trim(nchar) ,dims) - enddo + allocate(dims(3)) + + dims(1) = dimid_ni + dims(2) = dimid_nj + dims(3) = dimid_ncat + + call define_rest_field(File,'aicen',dims) + call define_rest_field(File,'vicen',dims) + call define_rest_field(File,'vsnon',dims) + call define_rest_field(File,'Tsfcn',dims) + + if (tr_iage) then + call define_rest_field(File,'iage',dims) + end if + + if (tr_FY) then + call define_rest_field(File,'FY',dims) + end if + + if (tr_lvl) then + call define_rest_field(File,'alvl',dims) + call define_rest_field(File,'vlvl',dims) + end if + + if (tr_pond_topo) then + call define_rest_field(File,'apnd',dims) + call define_rest_field(File,'hpnd',dims) + call define_rest_field(File,'ipnd',dims) + end if + + if (tr_pond_lvl) then + call define_rest_field(File,'apnd',dims) + call define_rest_field(File,'hpnd',dims) + call define_rest_field(File,'ipnd',dims) + call define_rest_field(File,'dhs',dims) + call define_rest_field(File,'ffrac',dims) + end if + + if (tr_brine) then + call define_rest_field(File,'fbrn',dims) + call define_rest_field(File,'first_ice',dims) endif - if (tr_bgc_chl) then + + if (skl_bgc) then do k = 1, n_algae write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_chl'//trim(nchar) ,dims) + call define_rest_field(File,'bgc_N'//trim(nchar) ,dims) enddo - endif - call define_rest_field(File,'bgc_Nit' ,dims) - if (tr_bgc_Am) & + if (tr_bgc_C) then + ! do k = 1, n_algae + ! write(nchar,'(i3.3)') k + ! call define_rest_field(File,'bgc_C'//trim(nchar) ,dims) + ! enddo + do k = 1, n_doc + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_DOC'//trim(nchar) ,dims) + enddo + do k = 1, n_dic + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_DIC'//trim(nchar) ,dims) + enddo + endif + if (tr_bgc_chl) then + do k = 1, n_algae + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_chl'//trim(nchar) ,dims) + enddo + endif + call define_rest_field(File,'bgc_Nit' ,dims) + if (tr_bgc_Am) & call define_rest_field(File,'bgc_Am' ,dims) - if (tr_bgc_Sil) & + if (tr_bgc_Sil) & call define_rest_field(File,'bgc_Sil' ,dims) - if (tr_bgc_hum) & + if (tr_bgc_hum) & call define_rest_field(File,'bgc_hum' ,dims) - if (tr_bgc_DMS) then - call define_rest_field(File,'bgc_DMSPp',dims) - call define_rest_field(File,'bgc_DMSPd',dims) - call define_rest_field(File,'bgc_DMS' ,dims) - endif - if (tr_bgc_PON) & + if (tr_bgc_DMS) then + call define_rest_field(File,'bgc_DMSPp',dims) + call define_rest_field(File,'bgc_DMSPd',dims) + call define_rest_field(File,'bgc_DMS' ,dims) + endif + if (tr_bgc_PON) & call define_rest_field(File,'bgc_PON' ,dims) - if (tr_bgc_DON) then - do k = 1, n_don - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DON'//trim(nchar) ,dims) - enddo - endif - if (tr_bgc_Fe ) then - do k = 1, n_fed - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Fed'//trim(nchar) ,dims) - enddo - do k = 1, n_fep - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Fep'//trim(nchar) ,dims) - enddo - endif - endif !skl_bgc + if (tr_bgc_DON) then + do k = 1, n_don + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_DON'//trim(nchar) ,dims) + enddo + endif + if (tr_bgc_Fe ) then + do k = 1, n_fed + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_Fed'//trim(nchar) ,dims) + enddo + do k = 1, n_fep + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_Fep'//trim(nchar) ,dims) + enddo + endif + endif !skl_bgc + if (solve_zsal) & + call define_rest_field(File,'Rayleigh',dims) !----------------------------------------------------------------- ! 4D restart fields, written as layers of 3D !----------------------------------------------------------------- - do k=1,nilyr - write(nchar,'(i3.3)') k - call define_rest_field(File,'sice'//trim(nchar),dims) - call define_rest_field(File,'qice'//trim(nchar),dims) - enddo - - do k=1,nslyr - write(nchar,'(i3.3)') k - call define_rest_field(File,'qsno'//trim(nchar),dims) - enddo - - if (tr_snow) then - do k=1,nslyr - write(nchar,'(i3.3)') k - call define_rest_field(File,'smice'//trim(nchar),dims) - call define_rest_field(File,'smliq'//trim(nchar),dims) - call define_rest_field(File, 'rhos'//trim(nchar),dims) - call define_rest_field(File, 'rsnw'//trim(nchar),dims) - enddo - endif - - if (tr_fsd) then - do k=1,nfsd + do k=1,nilyr write(nchar,'(i3.3)') k - call define_rest_field(File,'fsd'//trim(nchar),dims) + call define_rest_field(File,'sice'//trim(nchar),dims) + call define_rest_field(File,'qice'//trim(nchar),dims) enddo - endif - - if (tr_iso) then - do k=1,n_iso - write(nchar,'(i3.3)') k - call define_rest_field(File,'isosno'//nchar, dims) - call define_rest_field(File,'isoice'//nchar, dims) - enddo - endif - if (tr_aero) then - do k=1,n_aero + do k=1,nslyr write(nchar,'(i3.3)') k - call define_rest_field(File,'aerosnossl'//nchar, dims) - call define_rest_field(File,'aerosnoint'//nchar, dims) - call define_rest_field(File,'aeroicessl'//nchar, dims) - call define_rest_field(File,'aeroiceint'//nchar, dims) + call define_rest_field(File,'qsno'//trim(nchar),dims) enddo - endif - if (z_tracers) then - if (tr_zaero) then - do n = 1, n_zaero - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'zaero'//trim(ncharb)//trim(nchar),dims) - enddo !k - enddo !n - endif !tr_zaero - if (tr_bgc_Nit) then - do k = 1, nblyr+3 + if (tr_snow) then + do k=1,nslyr write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Nit'//trim(nchar),dims) - enddo - endif - if (tr_bgc_N) then - do n = 1, n_algae - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_N'//trim(ncharb)//trim(nchar),dims) - enddo - enddo - endif - if (tr_bgc_C) then - ! do n = 1, n_algae - ! write(ncharb,'(i3.3)') n - ! do k = 1, nblyr+3 - ! write(nchar,'(i3.3)') k - ! call define_rest_field(File,'bgc_C'//trim(ncharb)//trim(nchar),dims) - ! enddo - ! enddo - do n = 1, n_doc - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DOC'//trim(ncharb)//trim(nchar),dims) - enddo - enddo - do n = 1, n_dic - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DIC'//trim(ncharb)//trim(nchar),dims) - enddo - enddo - endif - if (tr_bgc_chl) then - do n = 1, n_algae - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_chl'//trim(ncharb)//trim(nchar),dims) - enddo + call define_rest_field(File,'smice'//trim(nchar),dims) + call define_rest_field(File,'smliq'//trim(nchar),dims) + call define_rest_field(File, 'rhos'//trim(nchar),dims) + call define_rest_field(File, 'rsnw'//trim(nchar),dims) enddo endif - if (tr_bgc_Am) then - do k = 1, nblyr+3 + + if (tr_fsd) then + do k=1,nfsd write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Am'//trim(nchar),dims) + call define_rest_field(File,'fsd'//trim(nchar),dims) enddo endif - if (tr_bgc_Sil) then - do k = 1, nblyr+3 + + if (tr_iso) then + do k=1,n_iso write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Sil'//trim(nchar),dims) + call define_rest_field(File,'isosno'//nchar, dims) + call define_rest_field(File,'isoice'//nchar, dims) enddo endif - if (tr_bgc_hum) then - do k = 1, nblyr+3 + + if (tr_aero) then + do k=1,n_aero write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_hum'//trim(nchar),dims) + call define_rest_field(File,'aerosnossl'//nchar, dims) + call define_rest_field(File,'aerosnoint'//nchar, dims) + call define_rest_field(File,'aeroicessl'//nchar, dims) + call define_rest_field(File,'aeroiceint'//nchar, dims) enddo endif - if (tr_bgc_DMS) then - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DMSPp'//trim(nchar),dims) - call define_rest_field(File,'bgc_DMSPd'//trim(nchar),dims) - call define_rest_field(File,'bgc_DMS'//trim(nchar),dims) - enddo + + if (solve_zsal) then + do k = 1, nblyr + write(nchar,'(i3.3)') k + call define_rest_field(File,'zSalinity'//trim(nchar),dims) + enddo endif - if (tr_bgc_PON) then - do k = 1, nblyr+3 + if (z_tracers) then + if (tr_zaero) then + do n = 1, n_zaero + write(ncharb,'(i3.3)') n + do k = 1, nblyr+3 write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_PON'//trim(nchar),dims) - enddo - endif - if (tr_bgc_DON) then - do n = 1, n_don - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DON'//trim(ncharb)//trim(nchar),dims) - enddo - enddo - endif - if (tr_bgc_Fe ) then - do n = 1, n_fed - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Fed'//trim(ncharb)//trim(nchar),dims) - enddo - enddo - do n = 1, n_fep - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Fep'//trim(ncharb)//trim(nchar),dims) - enddo + call define_rest_field(File,'zaero'//trim(ncharb)//trim(nchar),dims) + enddo !k + enddo !n + endif !tr_zaero + if (tr_bgc_Nit) then + do k = 1, nblyr+3 + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_Nit'//trim(nchar),dims) + enddo + endif + if (tr_bgc_N) then + do n = 1, n_algae + write(ncharb,'(i3.3)') n + do k = 1, nblyr+3 + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_N'//trim(ncharb)//trim(nchar),dims) + enddo + enddo + endif + if (tr_bgc_C) then + ! do n = 1, n_algae + ! write(ncharb,'(i3.3)') n + ! do k = 1, nblyr+3 + ! write(nchar,'(i3.3)') k + ! call + ! define_rest_field(File,'bgc_C'//trim(ncharb)//trim(nchar),dims) + ! enddo + ! enddo + do n = 1, n_doc + write(ncharb,'(i3.3)') n + do k = 1, nblyr+3 + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_DOC'//trim(ncharb)//trim(nchar),dims) + enddo + enddo + do n = 1, n_dic + write(ncharb,'(i3.3)') n + do k = 1, nblyr+3 + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_DIC'//trim(ncharb)//trim(nchar),dims) + enddo + enddo + endif + if (tr_bgc_chl) then + do n = 1, n_algae + write(ncharb,'(i3.3)') n + do k = 1, nblyr+3 + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_chl'//trim(ncharb)//trim(nchar),dims) + enddo + enddo + endif + if (tr_bgc_Am) then + do k = 1, nblyr+3 + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_Am'//trim(nchar),dims) + enddo + endif + if (tr_bgc_Sil) then + do k = 1, nblyr+3 + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_Sil'//trim(nchar),dims) + enddo + endif + if (tr_bgc_hum) then + do k = 1, nblyr+3 + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_hum'//trim(nchar),dims) + enddo + endif + if (tr_bgc_DMS) then + do k = 1, nblyr+3 + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_DMSPp'//trim(nchar),dims) + call define_rest_field(File,'bgc_DMSPd'//trim(nchar),dims) + call define_rest_field(File,'bgc_DMS'//trim(nchar),dims) + enddo + endif + if (tr_bgc_PON) then + do k = 1, nblyr+3 + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_PON'//trim(nchar),dims) + enddo + endif + if (tr_bgc_DON) then + do n = 1, n_don + write(ncharb,'(i3.3)') n + do k = 1, nblyr+3 + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_DON'//trim(ncharb)//trim(nchar),dims) + enddo + enddo + endif + if (tr_bgc_Fe ) then + do n = 1, n_fed + write(ncharb,'(i3.3)') n + do k = 1, nblyr+3 + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_Fed'//trim(ncharb)//trim(nchar),dims) + enddo + enddo + do n = 1, n_fep + write(ncharb,'(i3.3)') n + do k = 1, nblyr+3 + write(nchar,'(i3.3)') k + call define_rest_field(File,'bgc_Fep'//trim(ncharb)//trim(nchar),dims) + enddo + enddo + endif + do k = 1, nbtrcr + write(nchar,'(i3.3)') k + call define_rest_field(File,'zbgc_frac'//trim(nchar),dims) enddo - endif - do k = 1, nbtrcr - write(nchar,'(i3.3)') k - call define_rest_field(File,'zbgc_frac'//trim(nchar),dims) - enddo - endif !z_tracers + endif !z_tracers - deallocate(dims) - call ice_pio_check(pio_enddef(File), subname//' ERROR: enddef',file=__FILE__,line=__LINE__) + deallocate(dims) + status = pio_enddef(File) - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) + call ice_pio_initdecomp(iodesc=iodesc2d, precision=8) + call ice_pio_initdecomp(ndim3=ncat , iodesc=iodesc3d_ncat, remap=.true., precision=8) - call ice_pio_initdecomp(iodesc=iodesc2d, precision=8) - call ice_pio_initdecomp(ndim3=ncat , iodesc=iodesc3d_ncat, remap=.true., precision=8) +! endif ! restart_format if (my_task == master_task) then write(nu_diag,*) 'Writing ',filename(1:lenstr(filename)) endif - first_call = .false. - end subroutine init_restart_write !======================================================================= @@ -695,6 +664,7 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & field_loc, field_type) use ice_blocks, only: nx_block, ny_block + use ice_communicate, only: my_task, master_task use ice_constants, only: c0, field_loc_center use ice_boundary, only: ice_HaloUpdate use ice_domain, only: halo_info, distrb_info, nblocks @@ -702,98 +672,101 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & use ice_global_reductions, only: global_minval, global_maxval, global_sum integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - ndim3 , & ! third dimension - nrec ! record number (0 for sequential access) + nu , & ! unit number + ndim3 , & ! third dimension + nrec ! record number (0 for sequential access) real (kind=dbl_kind), dimension(nx_block,ny_block,ndim3,max_blocks), intent(inout) :: & - work ! input array (real, 8-byte) + work ! input array (real, 8-byte) character (len=4), intent(in) :: & - atype ! format for output array - ! (real/integer, 4-byte/8-byte) + atype ! format for output array + ! (real/integer, 4-byte/8-byte) logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output character (len=*), intent(in) :: vname integer (kind=int_kind), optional, intent(in) :: & - field_loc , & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) + field_loc, & ! location of field on staggered grid + field_type ! type of field (scalar, vector, angle) ! local variables integer (kind=int_kind) :: & - j , & ! dimension counter - n , & ! number of dimensions for variable - ndims , & ! number of variable dimensions - status ! status variable from netCDF routine + j, & ! dimension counter + n, & ! number of dimensions for variable + ndims, & ! number of variable dimensions + status ! status variable from netCDF routine real (kind=dbl_kind) :: amin,amax,asum character(len=*), parameter :: subname = '(read_restart_field)' - call pio_seterrorhandling(File, PIO_RETURN_ERROR) +! if (restart_format(1:3) == "pio") then + if (my_task == master_task) & + write(nu_diag,*)'Parallel restart file read: ',vname - if (my_task == master_task) then - write(nu_diag,*)'Parallel restart file read: ',vname - endif - - call ice_pio_check(pio_inq_varid(File,trim(vname),vardesc), & - subname// " ERROR: missing varid "//trim(vname),file=__FILE__,line=__LINE__) + call pio_seterrorhandling(File, PIO_BCAST_ERROR) - call ice_pio_check(pio_inq_varndims(File, vardesc, ndims), & - subname// " ERROR: missing varndims "//trim(vname),file=__FILE__,line=__LINE__) + status = pio_inq_varid(File,trim(vname),vardesc) - if (ndim3 == ncat .and. ndims == 3) then - call pio_read_darray(File, vardesc, iodesc3d_ncat, work, status) -#ifdef CESMCOUPLED - where (work == PIO_FILL_DOUBLE) work = c0 -#endif - if (present(field_loc)) then - do n=1,ndim3 - call ice_HaloUpdate (work(:,:,n,:), halo_info, & - field_loc, field_type) - enddo + if (status /= PIO_noerr) then + call abort_ice(subname// & + "ERROR: CICE restart? Missing variable: "//trim(vname)) endif - elseif (ndim3 == 1 .and. ndims == 2) then - call pio_read_darray(File, vardesc, iodesc2d, work, status) -#ifdef CESMCOUPLED - where (work == PIO_FILL_DOUBLE) work = c0 -#endif - if (present(field_loc)) then - call ice_HaloUpdate (work(:,:,1,:), halo_info, & - field_loc, field_type) - endif - else - write(nu_diag,*) "ndim3 not supported ",ndim3 - endif - call ice_pio_check(status, & - subname//" ERROR: reading var "//trim(vname),file=__FILE__,line=__LINE__) + status = pio_inq_varndims(File, vardesc, ndims) - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) + call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) - if (diag) then - if (ndim3 > 1) then - do n=1,ndim3 - amin = global_minval(work(:,:,n,:),distrb_info) - amax = global_maxval(work(:,:,n,:),distrb_info) - asum = global_sum(work(:,:,n,:), distrb_info, field_loc_center) +! if (ndim3 == ncat .and. ncat>1) then + if (ndim3 == ncat .and. ndims == 3) then + call pio_read_darray(File, vardesc, iodesc3d_ncat, work, status) + if (present(field_loc)) then + do n=1,ndim3 + call ice_HaloUpdate (work(:,:,n,:), halo_info, & + field_loc, field_type) + enddo + endif +! elseif (ndim3 == 1) then + elseif (ndim3 == 1 .and. ndims == 2) then + call pio_read_darray(File, vardesc, iodesc2d, work, status) + if (present(field_loc)) then + call ice_HaloUpdate (work(:,:,1,:), halo_info, & + field_loc, field_type) + endif + else + write(nu_diag,*) "ndim3 not supported ",ndim3 + endif + + if (diag) then + if (ndim3 > 1) then + do n=1,ndim3 + amin = global_minval(work(:,:,n,:),distrb_info) + amax = global_maxval(work(:,:,n,:),distrb_info) + asum = global_sum(work(:,:,n,:), distrb_info, field_loc_center) + if (my_task == master_task) then + write(nu_diag,*) ' min and max =', amin, amax + write(nu_diag,*) ' sum =',asum + endif + enddo + else + amin = global_minval(work(:,:,1,:),distrb_info) + amax = global_maxval(work(:,:,1,:),distrb_info) + asum = global_sum(work(:,:,1,:), distrb_info, field_loc_center) if (my_task == master_task) then - write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) + write(nu_diag,*) ' min and max =', amin, amax + write(nu_diag,*) ' sum =',asum + write(nu_diag,*) '' endif - enddo - else - amin = global_minval(work(:,:,1,:),distrb_info) - amax = global_maxval(work(:,:,1,:),distrb_info) - asum = global_sum(work(:,:,1,:), distrb_info, field_loc_center) - if (my_task == master_task) then - write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) endif + endif - endif +! else +! call abort_ice(subname//"ERROR: Invalid restart_format: "//trim(restart_format)) +! endif ! restart_format end subroutine read_restart_field @@ -805,86 +778,83 @@ end subroutine read_restart_field subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) use ice_blocks, only: nx_block, ny_block + use ice_communicate, only: my_task, master_task use ice_constants, only: c0, field_loc_center use ice_domain, only: distrb_info, nblocks use ice_domain_size, only: max_blocks, ncat use ice_global_reductions, only: global_minval, global_maxval, global_sum integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - ndim3 , & ! third dimension - nrec ! record number (0 for sequential access) + nu , & ! unit number + ndim3 , & ! third dimension + nrec ! record number (0 for sequential access) real (kind=dbl_kind), dimension(nx_block,ny_block,ndim3,max_blocks), intent(in) :: & - work ! input array (real, 8-byte) + work ! input array (real, 8-byte) character (len=4), intent(in) :: & - atype ! format for output array - ! (real/integer, 4-byte/8-byte) + atype ! format for output array + ! (real/integer, 4-byte/8-byte) logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output + diag ! if true, write diagnostic output character (len=*), intent(in) :: vname ! local variables integer (kind=int_kind) :: & - j , & ! dimension counter - n , & ! dimension counter - ndims , & ! number of variable dimensions - status ! status variable from netCDF routine + j, & ! dimension counter + n, & ! dimension counter + ndims, & ! number of variable dimensions + status ! status variable from netCDF routine real (kind=dbl_kind) :: amin,amax,asum character(len=*), parameter :: subname = '(write_restart_field)' - call pio_seterrorhandling(File, PIO_RETURN_ERROR) - - if (my_task == master_task) then - write(nu_diag,*)'Parallel restart file write: ',vname - endif - - call ice_pio_check(pio_inq_varid(File,trim(vname),vardesc), & - subname// " ERROR: missing varid "//trim(vname),file=__FILE__,line=__LINE__) - - call ice_pio_check(pio_inq_varndims(File, vardesc, ndims), & - subname// " ERROR: missing varndims "//trim(vname),file=__FILE__,line=__LINE__) +! if (restart_format(1:3) == "pio") then + if (my_task == master_task) & + write(nu_diag,*)'Parallel restart file write: ',vname - if (ndims==3) then - call pio_write_darray(File, vardesc, iodesc3d_ncat,work(:,:,:,1:nblocks), & - status, fillval=c0) - elseif (ndims == 2) then - call pio_write_darray(File, vardesc, iodesc2d, work(:,:,1,1:nblocks), & - status, fillval=c0) - else - write(nu_diag,*) "ndims not supported",ndims,ndim3 - endif + status = pio_inq_varid(File,trim(vname),vardesc) - call ice_pio_check(status, & - subname//" ERROR: writing "//trim(vname),file=__FILE__,line=__LINE__) + status = pio_inq_varndims(File, vardesc, ndims) - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) + if (ndims==3) then + call pio_write_darray(File, vardesc, iodesc3d_ncat,work(:,:,:,1:nblocks), & + status, fillval=c0) + elseif (ndims == 2) then + call pio_write_darray(File, vardesc, iodesc2d, work(:,:,1,1:nblocks), & + status, fillval=c0) + else + write(nu_diag,*) "ndims not supported",ndims,ndim3 + endif - if (diag) then - if (ndim3 > 1) then - do n=1,ndim3 - amin = global_minval(work(:,:,n,:),distrb_info) - amax = global_maxval(work(:,:,n,:),distrb_info) - asum = global_sum(work(:,:,n,:), distrb_info, field_loc_center) + if (diag) then + if (ndim3 > 1) then + do n=1,ndim3 + amin = global_minval(work(:,:,n,:),distrb_info) + amax = global_maxval(work(:,:,n,:),distrb_info) + asum = global_sum(work(:,:,n,:), distrb_info, field_loc_center) + if (my_task == master_task) then + write(nu_diag,*) ' min and max =', amin, amax + write(nu_diag,*) ' sum =',asum + endif + enddo + else + amin = global_minval(work(:,:,1,:),distrb_info) + amax = global_maxval(work(:,:,1,:),distrb_info) + asum = global_sum(work(:,:,1,:), distrb_info, field_loc_center) if (my_task == master_task) then - write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) + write(nu_diag,*) ' min and max =', amin, amax + write(nu_diag,*) ' sum =',asum endif - enddo - else - amin = global_minval(work(:,:,1,:),distrb_info) - amax = global_maxval(work(:,:,1,:),distrb_info) - asum = global_sum(work(:,:,1,:), distrb_info, field_loc_center) - if (my_task == master_task) then - write(nu_diag,*) ' min, max, sum =', amin, amax, asum, trim(vname) endif endif - endif +! else +! call abort_ice(subname//"ERROR: Invalid restart_format: "//trim(restart_format)) +! endif end subroutine write_restart_field @@ -895,7 +865,8 @@ end subroutine write_restart_field subroutine final_restart() - use ice_calendar, only: istep1, myear, mmonth, mday, msec + use ice_calendar, only: istep1, idate, msec + use ice_communicate, only: my_task, master_task character(len=*), parameter :: subname = '(final_restart)' @@ -903,10 +874,8 @@ subroutine final_restart() call PIO_freeDecomp(File,iodesc3d_ncat) call pio_closefile(File) - if (my_task == master_task) then - write(nu_diag,'(a,i8,4x,i4.4,a,i2.2,a,i2.2,a,i5.5)') & - 'Restart read/written ',istep1,myear,'-',mmonth,'-',mday,'-',msec - endif + if (my_task == master_task) & + write(nu_diag,*) 'Restart read/written ',istep1,idate,msec end subroutine final_restart @@ -917,49 +886,23 @@ end subroutine final_restart subroutine define_rest_field(File, vname, dims) -#ifndef USE_PIO1 - use netcdf, only: NF90_CHUNKED - use pio_nf, only: pio_def_var_chunking !PIO <2.6.0 was missing this in the pio module -#endif type(file_desc_t) , intent(in) :: File character (len=*) , intent(in) :: vname integer (kind=int_kind), intent(in) :: dims(:) - integer (kind=int_kind) :: chunks(size(dims)), i, status + integer (kind=int_kind) :: & + status ! status variable from netCDF routine character(len=*), parameter :: subname = '(define_rest_field)' - status = pio_def_var(File,trim(vname),pio_double,dims,vardesc) - call ice_pio_check(status, & - subname//' ERROR defining restart field '//trim(vname)) - -#ifndef USE_PIO1 - if (restart_format=='hdf5' .and. restart_deflate/=0) then - status = pio_def_var_deflate(File, vardesc, shuffle=0, deflate=1, deflate_level=restart_deflate) - call ice_pio_check(status, & - subname//' ERROR: deflating restart field '//trim(vname),file=__FILE__,line=__LINE__) - endif - - if (restart_format=='hdf5' .and. size(dims)>1) then - if (dims(1)==dimid_ni .and. dims(2)==dimid_nj) then - chunks(1)=restart_chunksize(1) - chunks(2)=restart_chunksize(2) - do i = 3, size(dims) - chunks(i) = 0 - enddo - - status = pio_def_var_chunking(File, vardesc, NF90_CHUNKED, chunks) - call ice_pio_check(status, subname//' ERROR: chunking restart field '//trim(vname),& - file=__FILE__,line=__LINE__) - endif - endif -#endif end subroutine define_rest_field !======================================================================= +<<<<<<<< HEAD:cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +======== ! Inquire field existance ! author T. Craig @@ -975,17 +918,14 @@ logical function query_field(nu,vname) query_field = .false. - call pio_seterrorhandling(File, PIO_RETURN_ERROR) - status = pio_inq_varid(File,trim(vname),vardesc) if (status == PIO_noerr) query_field = .true. - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) - end function query_field !======================================================================= +>>>>>>>> e628a9a (Update CICE for latest Consortium/main (#56)):cicecore/cicedyn/infrastructure/io/io_pio2/ice_restart.F90 end module ice_restart !======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 deleted file mode 100644 index 8eab5e260..000000000 --- a/cicecore/cicedynB/dynamics/ice_dyn_evp.F90 +++ /dev/null @@ -1,2304 +0,0 @@ -!======================================================================= -! -! Elastic-viscous-plastic sea ice dynamics model -! Computes ice velocity and deformation -! -! See: -! -! Hunke, E. C., and J. K. Dukowicz (1997). An elastic-viscous-plastic model -! for sea ice dynamics. J. Phys. Oceanogr., 27, 1849-1867. -! -! Hunke, E. C. (2001). Viscous-Plastic Sea Ice Dynamics with the EVP Model: -! Linearization Issues. J. Comput. Phys., 170, 18-38. -! -! Hunke, E. C., and J. K. Dukowicz (2002). The Elastic-Viscous-Plastic -! Sea Ice Dynamics Model in General Orthogonal Curvilinear Coordinates -! on a Sphere - Incorporation of Metric Terms. Mon. Weather Rev., -! 130, 1848-1865. -! -! Hunke, E. C., and J. K. Dukowicz (2003). The sea ice momentum -! equation in the free drift regime. Los Alamos Tech. Rep. LA-UR-03-2219. -! -! Hibler, W. D. (1979). A dynamic thermodynamic sea ice model. J. Phys. -! Oceanogr., 9, 817-846. -! -! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The -! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. -! -! author: Elizabeth C. Hunke, LANL -! -! 2003: Vectorized by Clifford Chen (Fujitsu) and William Lipscomb (LANL) -! 2004: Block structure added by William Lipscomb -! 2005: Removed boundary calls for stress arrays (WHL) -! 2006: Streamlined for efficiency by Elizabeth Hunke -! Converted to free source form (F90) - - module ice_dyn_evp - - use ice_kinds_mod - use ice_communicate, only: my_task, master_task - use ice_constants, only: field_loc_center, field_loc_NEcorner, & - field_loc_Nface, field_loc_Eface, & - field_type_scalar, field_type_vector - use ice_constants, only: c0, p027, p055, p111, p166, & - p222, p25, p333, p5, c1 - use ice_dyn_shared, only: stepu, stepuv_CD, stepu_C, stepv_C, & - dyn_prep1, dyn_prep2, dyn_finish, & - ndte, yield_curve, ecci, denom1, arlx1i, fcor_blk, fcorE_blk, fcorN_blk, & - uvel_init, vvel_init, uvelE_init, vvelE_init, uvelN_init, vvelN_init, & - seabed_stress_factor_LKD, seabed_stress_factor_prob, seabed_stress_method, & - seabed_stress, Ktens, revp - use ice_fileunits, only: nu_diag - use ice_exit, only: abort_ice - use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted - use icepack_intfc, only: icepack_ice_strength, icepack_query_parameters - - implicit none - private - public :: evp - -!======================================================================= - - contains - -!======================================================================= -! Elastic-viscous-plastic dynamics driver -! -#ifdef CICE_IN_NEMO -! Wind stress is set during this routine from the values supplied -! via NEMO (unless calc_strair is true). These values are supplied -! rotated on u grid and multiplied by aice. strairxT = 0 in this -! case so operations in dyn_prep1 are pointless but carried out to -! minimise code changes. -#endif -! -! author: Elizabeth C. Hunke, LANL - - subroutine evp (dt) - - use ice_arrays_column, only: Cdn_ocn - use ice_boundary, only: ice_halo, ice_HaloMask, ice_HaloUpdate, & - ice_HaloDestroy, ice_HaloUpdate_stress - use ice_blocks, only: block, get_block, nx_block, ny_block, nghost - use ice_domain, only: nblocks, blocks_ice, halo_info, maskhalo_dyn - use ice_domain_size, only: max_blocks, ncat, nx_global, ny_global - use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & - strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & - strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & - strax, stray, & - TbU, hwater, & - strairxN, strairyN, fmN, & - strtltxN, strtltyN, strocnxN, strocnyN, strintxN, strintyN, taubxN, taubyN, & - TbN, & - strairxE, strairyE, fmE, & - strtltxE, strtltyE, strocnxE, strocnyE, strintxE, strintyE, taubxE, taubyE, & - TbE, & - stressp_1, stressp_2, stressp_3, stressp_4, & - stressm_1, stressm_2, stressm_3, stressm_4, & - stress12_1, stress12_2, stress12_3, stress12_4, & - stresspT, stressmT, stress12T, & - stresspU, stressmU, stress12U - use ice_grid, only: tmask, umask, umaskCD, nmask, emask, uvm, epm, npm, & - dxE, dxN, dxT, dxU, dyE, dyN, dyT, dyU, & - ratiodxN, ratiodxNr, ratiodyE, ratiodyEr, & - dxhy, dyhx, cxp, cyp, cxm, cym, & - tarear, uarear, earear, narear, grid_average_X2Y, uarea, & - grid_type, grid_ice, & - grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv - use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, uvelN, vvelN, & - uvelE, vvelE, divu, shear, & - aice_init, aice0, aicen, vicen, strength - use ice_timers, only: timer_dynamics, timer_bound, & - ice_timer_start, ice_timer_stop, timer_evp_1d, timer_evp_2d - use ice_dyn_evp_1d, only: ice_dyn_evp_1d_copyin, ice_dyn_evp_1d_kernel, & - ice_dyn_evp_1d_copyout - use ice_dyn_shared, only: evp_algorithm, stack_fields, unstack_fields, & - DminTarea, visc_method, deformations, deformationsC_T, deformationsCD_T, & - strain_rates_U, & - iceTmask, iceUmask, iceEmask, iceNmask, & - dyn_haloUpdate - - real (kind=dbl_kind), intent(in) :: & - dt ! time step - - ! local variables - - integer (kind=int_kind) :: & - ksub , & ! subcycle step - iblk , & ! block index - ilo,ihi,jlo,jhi, & ! beginning and end of physical domain - i, j, ij ! local indices - - integer (kind=int_kind), dimension(max_blocks) :: & - icellT , & ! no. of cells where iceTmask = .true. - icellN , & ! no. of cells where iceNmask = .true. - icellE , & ! no. of cells where iceEmask = .true. - icellU ! no. of cells where iceUmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks) :: & - indxTi , & ! compressed index in i-direction - indxTj , & ! compressed index in j-direction - indxEi , & ! compressed index in i-direction - indxEj , & ! compressed index in j-direction - indxNi , & ! compressed index in i-direction - indxNj , & ! compressed index in j-direction - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - uocnU , & ! i ocean current (m/s) - vocnU , & ! j ocean current (m/s) - ss_tltxU , & ! sea surface slope, x-direction (m/m) - ss_tltyU , & ! sea surface slope, y-direction (m/m) - cdn_ocnU , & ! ocn drag coefficient - tmass , & ! total mass of ice and snow (kg/m^2) - waterxU , & ! for ocean stress calculation, x (m/s) - wateryU , & ! for ocean stress calculation, y (m/s) - forcexU , & ! work array: combined atm stress and ocn tilt, x - forceyU , & ! work array: combined atm stress and ocn tilt, y - umass , & ! total mass of ice and snow (u grid) - umassdti ! mass of U-cell/dte (kg/m^2 s) - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - uocnN , & ! i ocean current (m/s) - vocnN , & ! j ocean current (m/s) - ss_tltxN , & ! sea surface slope, x-direction (m/m) - ss_tltyN , & ! sea surface slope, y-direction (m/m) - cdn_ocnN , & ! ocn drag coefficient - waterxN , & ! for ocean stress calculation, x (m/s) - wateryN , & ! for ocean stress calculation, y (m/s) - forcexN , & ! work array: combined atm stress and ocn tilt, x - forceyN , & ! work array: combined atm stress and ocn tilt, y - aiN , & ! ice fraction on N-grid - nmass , & ! total mass of ice and snow (N grid) - nmassdti ! mass of N-cell/dte (kg/m^2 s) - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - uocnE , & ! i ocean current (m/s) - vocnE , & ! j ocean current (m/s) - ss_tltxE , & ! sea surface slope, x-direction (m/m) - ss_tltyE , & ! sea surface slope, y-direction (m/m) - cdn_ocnE , & ! ocn drag coefficient - waterxE , & ! for ocean stress calculation, x (m/s) - wateryE , & ! for ocean stress calculation, y (m/s) - forcexE , & ! work array: combined atm stress and ocn tilt, x - forceyE , & ! work array: combined atm stress and ocn tilt, y - aiE , & ! ice fraction on E-grid - emass , & ! total mass of ice and snow (E grid) - emassdti ! mass of E-cell/dte (kg/m^2 s) - - real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:), & ! 2 bundled fields - fld3(:,:,:,:), & ! 3 bundled fields - fld4(:,:,:,:) ! 4 bundled fields - - real (kind=dbl_kind), allocatable :: & - strengthU(:,:,:), & ! strength averaged to U points - divergU (:,:,:), & ! div array on U points, differentiate from divu - tensionU (:,:,:), & ! tension array on U points - shearU (:,:,:), & ! shear array on U points - deltaU (:,:,:), & ! delta array on U points - zetax2T (:,:,:), & ! zetax2 = 2*zeta (bulk viscosity) - zetax2U (:,:,:), & ! zetax2T averaged to U points - etax2T (:,:,:), & ! etax2 = 2*eta (shear viscosity) - etax2U (:,:,:) ! etax2T averaged to U points - - real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & - strtmp ! stress combinations for momentum equation - - logical (kind=log_kind) :: & - calc_strair ! calculate air/ice stress - - integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & - halomask ! generic halo mask - - type (ice_halo) :: & - halo_info_mask ! ghost cell update info for masked halo - - type (block) :: & - this_block ! block information for current block - - logical (kind=log_kind), save :: & - first_time = .true. ! first time logical - - character(len=*), parameter :: subname = '(evp)' - - call ice_timer_start(timer_dynamics) ! dynamics - - !----------------------------------------------------------------- - ! Initialize - !----------------------------------------------------------------- - - allocate(fld2(nx_block,ny_block,2,max_blocks)) - allocate(fld3(nx_block,ny_block,3,max_blocks)) - allocate(fld4(nx_block,ny_block,4,max_blocks)) - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - - allocate(strengthU(nx_block,ny_block,max_blocks)) - allocate(divergU (nx_block,ny_block,max_blocks)) - allocate(tensionU (nx_block,ny_block,max_blocks)) - allocate(shearU (nx_block,ny_block,max_blocks)) - allocate(deltaU (nx_block,ny_block,max_blocks)) - allocate(zetax2T (nx_block,ny_block,max_blocks)) - allocate(zetax2U (nx_block,ny_block,max_blocks)) - allocate(etax2T (nx_block,ny_block,max_blocks)) - allocate(etax2U (nx_block,ny_block,max_blocks)) - strengthU(:,:,:) = c0 - divergU (:,:,:) = c0 - tensionU (:,:,:) = c0 - shearU (:,:,:) = c0 - deltaU (:,:,:) = c0 - zetax2T (:,:,:) = c0 - zetax2U (:,:,:) = c0 - etax2T (:,:,:) = c0 - etax2U (:,:,:) = c0 - - endif - - ! This call is needed only if dt changes during runtime. -! call set_evp_parameters (dt) - - !----------------------------------------------------------------- - ! boundary updates - ! commented out because the ghost cells are freshly - ! updated after cleanup_itd - !----------------------------------------------------------------- - -! call ice_timer_start(timer_bound) -! call ice_HaloUpdate (aice, halo_info, & -! field_loc_center, field_type_scalar) -! call ice_HaloUpdate (vice, halo_info, & -! field_loc_center, field_type_scalar) -! call ice_HaloUpdate (vsno, halo_info, & -! field_loc_center, field_type_scalar) -! call ice_timer_stop(timer_bound) - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) SCHEDULE(runtime) - do iblk = 1, nblocks - - do j = 1, ny_block - do i = 1, nx_block - rdg_conv (i,j,iblk) = c0 - rdg_shear(i,j,iblk) = c0 - divu (i,j,iblk) = c0 - shear(i,j,iblk) = c0 - enddo - enddo - - !----------------------------------------------------------------- - ! preparation for dynamics - !----------------------------------------------------------------- - - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - call dyn_prep1 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - aice (:,:,iblk), vice (:,:,iblk), & - vsno (:,:,iblk), tmask (:,:,iblk), & - tmass (:,:,iblk), iceTmask(:,:,iblk)) - - enddo ! iblk - !$OMP END PARALLEL DO - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (iceTmask, halo_info, & - field_loc_center, field_type_scalar) - call ice_timer_stop(timer_bound) - - !----------------------------------------------------------------- - ! convert fields from T to U grid - !----------------------------------------------------------------- - - call stack_fields(tmass, aice_init, cdn_ocn, fld3) - call ice_HaloUpdate (fld3, halo_info, & - field_loc_center, field_type_scalar) - call stack_fields(uocn, vocn, ss_tltx, ss_tlty, fld4) - call ice_HaloUpdate (fld4, halo_info, & - field_loc_center, field_type_vector) - call unstack_fields(fld3, tmass, aice_init, cdn_ocn) - call unstack_fields(fld4, uocn, vocn, ss_tltx, ss_tlty) - - call grid_average_X2Y('S', tmass , 'T' , umass , 'U') - call grid_average_X2Y('S', aice_init, 'T' , aiU , 'U') - call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnU, 'U') - call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnU , 'U') - call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnU , 'U') - call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') - call grid_average_X2Y('S', ss_tlty , grid_ocn_dynv, ss_tltyU, 'U') - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - call grid_average_X2Y('S', tmass , 'T' , emass , 'E') - call grid_average_X2Y('S', aice_init, 'T' , aie , 'E') - call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnE, 'E') - call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnE , 'E') - call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnE , 'E') - call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxE, 'E') - call grid_average_X2Y('S', ss_tlty , grid_ocn_dynv, ss_tltyE, 'E') - call grid_average_X2Y('S', tmass , 'T' , nmass , 'N') - call grid_average_X2Y('S', aice_init, 'T' , ain , 'N') - call grid_average_X2Y('S', cdn_ocn , 'T' , cdn_ocnN, 'N') - call grid_average_X2Y('S', uocn , grid_ocn_dynu, uocnN , 'N') - call grid_average_X2Y('S', vocn , grid_ocn_dynv, vocnN , 'N') - call grid_average_X2Y('S', ss_tltx , grid_ocn_dynu, ss_tltxN, 'N') - call grid_average_X2Y('S', ss_tlty , grid_ocn_dynv, ss_tltyN, 'N') - endif - !---------------------------------------------------------------- - ! Set wind stress to values supplied via NEMO or other forcing - ! Map T to U, N, E as needed - ! This wind stress is rotated on u grid and multiplied by aice in coupler - !---------------------------------------------------------------- - call icepack_query_parameters(calc_strair_out=calc_strair) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - if (.not. calc_strair) then - call grid_average_X2Y('F', strax, grid_atm_dynu, strairxU, 'U') - call grid_average_X2Y('F', stray, grid_atm_dynv, strairyU, 'U') - else - call ice_HaloUpdate (strairxT, halo_info, & - field_loc_center, field_type_vector) - call ice_HaloUpdate (strairyT, halo_info, & - field_loc_center, field_type_vector) - call grid_average_X2Y('F', strairxT, 'T', strairxU, 'U') - call grid_average_X2Y('F', strairyT, 'T', strairyU, 'U') - endif - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - if (.not. calc_strair) then - call grid_average_X2Y('F', strax , grid_atm_dynu, strairxN, 'N') - call grid_average_X2Y('F', stray , grid_atm_dynv, strairyN, 'N') - call grid_average_X2Y('F', strax , grid_atm_dynu, strairxE, 'E') - call grid_average_X2Y('F', stray , grid_atm_dynv, strairyE, 'E') - else - call grid_average_X2Y('F', strairxT, 'T' , strairxN, 'N') - call grid_average_X2Y('F', strairyT, 'T' , strairyN, 'N') - call grid_average_X2Y('F', strairxT, 'T' , strairxE, 'E') - call grid_average_X2Y('F', strairyT, 'T' , strairyE, 'E') - endif - endif - - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks - - !----------------------------------------------------------------- - ! more preparation for dynamics - !----------------------------------------------------------------- - - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - if (trim(grid_ice) == 'B') then - call dyn_prep2 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - icellT (iblk), icellU (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - aiU (:,:,iblk), umass (:,:,iblk), & - umassdti (:,:,iblk), fcor_blk (:,:,iblk), & - umask (:,:,iblk), & - uocnU (:,:,iblk), vocnU (:,:,iblk), & - strairxU (:,:,iblk), strairyU (:,:,iblk), & - ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - iceTmask (:,:,iblk), iceUmask (:,:,iblk), & - fmU (:,:,iblk), dt, & - strtltxU (:,:,iblk), strtltyU (:,:,iblk), & - strocnxU (:,:,iblk), strocnyU (:,:,iblk), & - strintxU (:,:,iblk), strintyU (:,:,iblk), & - taubxU (:,:,iblk), taubyU (:,:,iblk), & - waterxU (:,:,iblk), wateryU (:,:,iblk), & - forcexU (:,:,iblk), forceyU (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - uvel_init (:,:,iblk), vvel_init (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - TbU (:,:,iblk)) - - elseif (trim(grid_ice) == 'CD' .or. grid_ice == 'C') then - call dyn_prep2 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - icellT (iblk), icellU (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - aiU (:,:,iblk), umass (:,:,iblk), & - umassdti (:,:,iblk), fcor_blk (:,:,iblk), & - umaskCD (:,:,iblk), & - uocnU (:,:,iblk), vocnU (:,:,iblk), & - strairxU (:,:,iblk), strairyU (:,:,iblk), & - ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - iceTmask (:,:,iblk), iceUmask (:,:,iblk), & - fmU (:,:,iblk), dt, & - strtltxU (:,:,iblk), strtltyU (:,:,iblk), & - strocnxU (:,:,iblk), strocnyU (:,:,iblk), & - strintxU (:,:,iblk), strintyU (:,:,iblk), & - taubxU (:,:,iblk), taubyU (:,:,iblk), & - waterxU (:,:,iblk), wateryU (:,:,iblk), & - forcexU (:,:,iblk), forceyU (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - uvel_init (:,:,iblk), vvel_init (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - TbU (:,:,iblk)) - endif - - !----------------------------------------------------------------- - ! ice strength - !----------------------------------------------------------------- - - strength(:,:,iblk) = c0 ! initialize - do ij = 1, icellT(iblk) - i = indxTi(ij, iblk) - j = indxTj(ij, iblk) - call icepack_ice_strength(ncat = ncat, & - aice = aice (i,j, iblk), & - vice = vice (i,j, iblk), & - aice0 = aice0 (i,j, iblk), & - aicen = aicen (i,j,:,iblk), & - vicen = vicen (i,j,:,iblk), & - strength = strength(i,j, iblk) ) - enddo ! ij - - enddo ! iblk - !$OMP END PARALLEL DO - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks - - !----------------------------------------------------------------- - ! more preparation for dynamics on N grid - !----------------------------------------------------------------- - - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - call dyn_prep2 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - icellT (iblk), icellN (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - aiN (:,:,iblk), nmass (:,:,iblk), & - nmassdti (:,:,iblk), fcorN_blk (:,:,iblk), & - nmask (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - strairxN (:,:,iblk), strairyN (:,:,iblk), & - ss_tltxN (:,:,iblk), ss_tltyN (:,:,iblk), & - iceTmask (:,:,iblk), iceNmask (:,:,iblk), & - fmN (:,:,iblk), dt, & - strtltxN (:,:,iblk), strtltyN (:,:,iblk), & - strocnxN (:,:,iblk), strocnyN (:,:,iblk), & - strintxN (:,:,iblk), strintyN (:,:,iblk), & - taubxN (:,:,iblk), taubyN (:,:,iblk), & - waterxN (:,:,iblk), wateryN (:,:,iblk), & - forcexN (:,:,iblk), forceyN (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) - - !----------------------------------------------------------------- - ! more preparation for dynamics on E grid - !----------------------------------------------------------------- - - call dyn_prep2 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - icellT (iblk), icellE (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - aiE (:,:,iblk), emass (:,:,iblk), & - emassdti (:,:,iblk), fcorE_blk (:,:,iblk), & - emask (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - strairxE (:,:,iblk), strairyE (:,:,iblk), & - ss_tltxE (:,:,iblk), ss_tltyE (:,:,iblk), & - iceTmask (:,:,iblk), iceEmask (:,:,iblk), & - fmE (:,:,iblk), dt, & - strtltxE (:,:,iblk), strtltyE (:,:,iblk), & - strocnxE (:,:,iblk), strocnyE (:,:,iblk), & - strintxE (:,:,iblk), strintyE (:,:,iblk), & - taubxE (:,:,iblk), taubyE (:,:,iblk), & - waterxE (:,:,iblk), wateryE (:,:,iblk), & - forcexE (:,:,iblk), forceyE (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) - - - do i=1,nx_block - do j=1,ny_block - if (.not.iceUmask(i,j,iblk)) then - stresspU (i,j,iblk) = c0 - stressmU (i,j,iblk) = c0 - stress12U(i,j,iblk) = c0 - endif - if (.not.iceTmask(i,j,iblk)) then - stresspT (i,j,iblk) = c0 - stressmT (i,j,iblk) = c0 - stress12T(i,j,iblk) = c0 - endif - enddo - enddo - enddo ! iblk - !$OMP END PARALLEL DO - - endif ! grid_ice - - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (uvelE, halo_info, & - field_loc_Eface, field_type_vector) - call ice_HaloUpdate (vvelN, halo_info, & - field_loc_Nface, field_type_vector) - call ice_timer_stop(timer_bound) - - if (grid_ice == 'C') then - call grid_average_X2Y('A', uvelE, 'E', uvelN, 'N') - call grid_average_X2Y('A', vvelN, 'N', vvelE, 'E') - uvelN(:,:,:) = uvelN(:,:,:)*npm(:,:,:) - vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) - endif - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (uvelN, halo_info, & - field_loc_Nface, field_type_vector) - call ice_HaloUpdate (vvelE, halo_info, & - field_loc_Eface, field_type_vector) - call ice_timer_stop(timer_bound) - - call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') - call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') - uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) - vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) - endif - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (strength, halo_info, & - field_loc_center, field_type_scalar) - - ! velocities may have changed in dyn_prep2 - call stack_fields(uvel, vvel, fld2) - call ice_HaloUpdate (fld2, halo_info, & - field_loc_NEcorner, field_type_vector) - call unstack_fields(fld2, uvel, vvel) - call ice_timer_stop(timer_bound) - - if (maskhalo_dyn) then - halomask = 0 - if (grid_ice == 'B') then - where (iceUmask) halomask = 1 - elseif (grid_ice == 'C' .or. grid_ice == 'CD') then - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo,jhi - do i = ilo,ihi - if (iceTmask(i ,j ,iblk) .or. & - iceTmask(i-1,j ,iblk) .or. & - iceTmask(i+1,j ,iblk) .or. & - iceTmask(i ,j-1,iblk) .or. & - iceTmask(i ,j+1,iblk)) then - halomask(i,j,iblk) = 1 - endif - enddo - enddo - enddo - !$OMP END PARALLEL DO - endif - call ice_timer_start(timer_bound) - call ice_HaloUpdate (halomask, halo_info, & - field_loc_center, field_type_scalar) - call ice_timer_stop(timer_bound) - call ice_HaloMask(halo_info_mask, halo_info, halomask) - endif - - !----------------------------------------------------------------- - ! seabed stress factor TbU (TbU is part of Cb coefficient) - !----------------------------------------------------------------- - - if (seabed_stress) then - - if (grid_ice == "B") then - - if ( seabed_stress_method == 'LKD' ) then - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - call seabed_stress_factor_LKD (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj(:,iblk), & - vice (:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), TbU (:,:,iblk)) - enddo - !$OMP END PARALLEL DO - - elseif ( seabed_stress_method == 'probabilistic' ) then - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - call seabed_stress_factor_prob (nx_block , ny_block , & - icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & - icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & - aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & - hwater (:,:,iblk), TbU (:,:,iblk)) - enddo - !$OMP END PARALLEL DO - endif - - elseif (grid_ice == "C" .or. grid_ice == "CD") then - - if ( seabed_stress_method == 'LKD' ) then - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - call seabed_stress_factor_LKD (nx_block , ny_block, & - icellE (iblk), & - indxEi (:,iblk), indxEj(:,iblk), & - vice (:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), TbE (:,:,iblk)) - call seabed_stress_factor_LKD (nx_block , ny_block, & - icellN (iblk), & - indxNi (:,iblk), indxNj(:,iblk), & - vice (:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), TbN (:,:,iblk)) - enddo - !$OMP END PARALLEL DO - - elseif ( seabed_stress_method == 'probabilistic' ) then - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - call seabed_stress_factor_prob (nx_block , ny_block , & - icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & - icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & - aicen(:,:,:,iblk), vicen(:,:,:,iblk) , & - hwater (:,:,iblk), TbU (:,:,iblk) , & - TbE (:,:,iblk), TbN (:,:,iblk) , & - icellE(iblk), indxEi(:,iblk), indxEj(:,iblk), & - icellN(iblk), indxNi(:,iblk), indxNj(:,iblk) ) - enddo - !$OMP END PARALLEL DO - endif - - endif - - endif - - if (evp_algorithm == "shared_mem_1d" ) then - - if (first_time .and. my_task == master_task) then - write(nu_diag,'(3a)') subname,' Entering evp_algorithm version ',evp_algorithm - first_time = .false. - endif - if (trim(grid_type) == 'tripole') then - call abort_ice(trim(subname)//' & - & Kernel not tested on tripole grid. Set evp_algorithm=standard_2d') - endif - - call ice_timer_start(timer_evp_1d) - call ice_dyn_evp_1d_copyin( & - nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & - iceTmask, iceUmask, & - cdn_ocnU,aiU,uocnU,vocnU,forcexU,forceyU,TbU, & - umassdti,fmU,uarear,tarear,strintxU,strintyU,uvel_init,vvel_init,& - strength,uvel,vvel,dxT,dyT, & - stressp_1 ,stressp_2, stressp_3, stressp_4, & - stressm_1 ,stressm_2, stressm_3, stressm_4, & - stress12_1,stress12_2,stress12_3,stress12_4 ) - call ice_dyn_evp_1d_kernel() - call ice_dyn_evp_1d_copyout( & - nx_block,ny_block,nblocks,nx_global+2*nghost,ny_global+2*nghost, & -!strocn uvel,vvel, strocnxU,strocnyU, strintxU,strintyU, & - uvel,vvel, strintxU,strintyU, & - stressp_1, stressp_2, stressp_3, stressp_4, & - stressm_1, stressm_2, stressm_3, stressm_4, & - stress12_1,stress12_2,stress12_3,stress12_4, & - divu,rdg_conv,rdg_shear,shear,taubxU,taubyU ) - call ice_timer_stop(timer_evp_1d) - - else ! evp_algorithm == standard_2d (Standard CICE) - - call ice_timer_start(timer_evp_2d) - - if (grid_ice == "B") then - - do ksub = 1,ndte ! subcycling - - !$OMP PARALLEL DO PRIVATE(iblk,strtmp) SCHEDULE(runtime) - do iblk = 1, nblocks - - !----------------------------------------------------------------- - ! stress tensor equation, total surface stress - !----------------------------------------------------------------- - call stress (nx_block , ny_block , & - icellT (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - dxhy (:,:,iblk), dyhx (:,:,iblk), & - cxp (:,:,iblk), cyp (:,:,iblk), & - cxm (:,:,iblk), cym (:,:,iblk), & - DminTarea (:,:,iblk), & - strength (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - strtmp (:,:,:) ) - - !----------------------------------------------------------------- - ! momentum equation - !----------------------------------------------------------------- - call stepu (nx_block , ny_block , & - icellU (iblk), Cdn_ocnU(:,:,iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - aiU (:,:,iblk), strtmp (:,:,:), & - uocnU (:,:,iblk), vocnU (:,:,iblk), & - waterxU (:,:,iblk), wateryU (:,:,iblk), & - forcexU (:,:,iblk), forceyU (:,:,iblk), & - umassdti (:,:,iblk), fmU (:,:,iblk), & - uarear (:,:,iblk), & - strintxU (:,:,iblk), strintyU(:,:,iblk), & - taubxU (:,:,iblk), taubyU (:,:,iblk), & - uvel_init(:,:,iblk), vvel_init(:,:,iblk),& - uvel (:,:,iblk), vvel (:,:,iblk), & - TbU (:,:,iblk)) - - enddo ! iblk - !$OMP END PARALLEL DO - - ! U fields at NE corner - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_NEcorner, field_type_vector, & - uvel, vvel) - - enddo ! sub cycling - - !----------------------------------------------------------------- - ! save quantities for mechanical redistribution - !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - call deformations (nx_block , ny_block , & - icellT (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - cxp (:,:,iblk), cyp (:,:,iblk), & - cxm (:,:,iblk), cym (:,:,iblk), & - tarear (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk) ) - enddo - !$OMP END PARALLEL DO - - - elseif (grid_ice == "C") then - - do ksub = 1,ndte ! subcycling - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - - !----------------------------------------------------------------- - ! strain rates at U point - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - call strain_rates_U (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxE (:,:,iblk), dyN (:,:,iblk), & - dxU (:,:,iblk), dyU (:,:,iblk), & - ratiodxN(:,:,iblk), ratiodxNr(:,:,iblk), & - ratiodyE(:,:,iblk), ratiodyEr(:,:,iblk), & - epm (:,:,iblk), npm (:,:,iblk), & - divergU (:,:,iblk), tensionU (:,:,iblk), & - shearU (:,:,iblk), deltaU (:,:,iblk) ) - - enddo ! iblk - !$OMP END PARALLEL DO - - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_NEcorner, field_type_scalar, & - shearU) - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call stressC_T (nx_block , ny_block , & - icellT (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - uarea (:,:,iblk), DminTarea (:,:,iblk), & - strength (:,:,iblk), shearU (:,:,iblk), & - zetax2T (:,:,iblk), etax2T (:,:,iblk), & - stresspT (:,:,iblk), stressmT (:,:,iblk)) - - enddo - !$OMP END PARALLEL DO - - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_center, field_type_scalar, & - zetax2T, etax2T, stresspT, stressmT) - - if (visc_method == 'avg_strength') then - call grid_average_X2Y('S', strength, 'T', strengthU, 'U') - elseif (visc_method == 'avg_zeta') then - call grid_average_X2Y('S', etax2T , 'T', etax2U , 'U') - endif - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call stressC_U (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - uarea (:,:,iblk), & - etax2U (:,:,iblk), deltaU (:,:,iblk), & - strengthU (:,:,iblk), shearU (:,:,iblk), & - stress12U (:,:,iblk)) - enddo - !$OMP END PARALLEL DO - - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info , halo_info_mask, & - field_loc_NEcorner, field_type_scalar, & - stress12U) - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - - call div_stress_Ex (nx_block , ny_block , & - icellE (iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - dxE (:,:,iblk), dyE (:,:,iblk), & - dxU (:,:,iblk), dyT (:,:,iblk), & - earear (:,:,iblk) , & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12U (:,:,iblk), strintxE (:,:,iblk) ) - - call div_stress_Ny (nx_block , ny_block , & - icellN (iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - dxN (:,:,iblk), dyN (:,:,iblk), & - dxT (:,:,iblk), dyU (:,:,iblk), & - narear (:,:,iblk) , & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12U (:,:,iblk), strintyN (:,:,iblk) ) - - enddo - !$OMP END PARALLEL DO - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - - call stepu_C (nx_block , ny_block , & ! u, E point - icellE (iblk), Cdn_ocnE (:,:,iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - aiE (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - waterxE (:,:,iblk), forcexE (:,:,iblk), & - emassdti (:,:,iblk), fmE (:,:,iblk), & - strintxE (:,:,iblk), taubxE (:,:,iblk), & - uvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) - - call stepv_C (nx_block, ny_block, & ! v, N point - icellN (iblk), Cdn_ocnN (:,:,iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - aiN (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - wateryN (:,:,iblk), forceyN (:,:,iblk), & - nmassdti (:,:,iblk), fmN (:,:,iblk), & - strintyN (:,:,iblk), taubyN (:,:,iblk), & - vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) - enddo - !$OMP END PARALLEL DO - - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_Eface, field_type_vector, & - uvelE) - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_Nface, field_type_vector, & - vvelN) - - call grid_average_X2Y('A', uvelE, 'E', uvelN, 'N') - call grid_average_X2Y('A', vvelN, 'N', vvelE, 'E') - uvelN(:,:,:) = uvelN(:,:,:)*npm(:,:,:) - vvelE(:,:,:) = vvelE(:,:,:)*epm(:,:,:) - - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_Nface, field_type_vector, & - uvelN) - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_Eface, field_type_vector, & - vvelE) - - call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') - call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') - - uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) - vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) - ! U fields at NE corner - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_NEcorner, field_type_vector, & - uvel, vvel) - - enddo ! subcycling - - !----------------------------------------------------------------- - ! save quantities for mechanical redistribution - !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - call deformationsC_T (nx_block , ny_block , & - icellT (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), uarea (:,:,iblk), & - shearU (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) - enddo - !$OMP END PARALLEL DO - - elseif (grid_ice == "CD") then - - do ksub = 1,ndte ! subcycling - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call stressCD_T (nx_block , ny_block , & - icellT (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - DminTarea (:,:,iblk), & - strength (:,:,iblk), & - zetax2T (:,:,iblk), etax2T (:,:,iblk), & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12T (:,:,iblk) ) - - enddo - !$OMP END PARALLEL DO - - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_center, field_type_scalar, & - zetax2T, etax2T) - - if (visc_method == 'avg_strength') then - call grid_average_X2Y('S', strength, 'T', strengthU, 'U') - elseif (visc_method == 'avg_zeta') then - call grid_average_X2Y('S', zetax2T , 'T', zetax2U , 'U') - call grid_average_X2Y('S', etax2T , 'T', etax2U , 'U') - endif - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - !----------------------------------------------------------------- - ! strain rates at U point - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - call strain_rates_U (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxE (:,:,iblk), dyN (:,:,iblk), & - dxU (:,:,iblk), dyU (:,:,iblk), & - ratiodxN (:,:,iblk), ratiodxNr(:,:,iblk), & - ratiodyE (:,:,iblk), ratiodyEr(:,:,iblk), & - epm (:,:,iblk), npm (:,:,iblk), & - divergU (:,:,iblk), tensionU (:,:,iblk), & - shearU (:,:,iblk), DeltaU (:,:,iblk) ) - - call stressCD_U (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - uarea (:,:,iblk), & - zetax2U (:,:,iblk), etax2U (:,:,iblk), & - strengthU(:,:,iblk), & - divergU (:,:,iblk), tensionU (:,:,iblk), & - shearU (:,:,iblk), DeltaU (:,:,iblk), & - stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12U(:,:,iblk)) - enddo - !$OMP END PARALLEL DO - - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_center, field_type_scalar, & - stresspT, stressmT, stress12T) - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_NEcorner,field_type_scalar, & - stresspU, stressmU, stress12U) - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - - call div_stress_Ex (nx_block , ny_block , & - icellE (iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - dxE (:,:,iblk), dyE (:,:,iblk), & - dxU (:,:,iblk), dyT (:,:,iblk), & - earear (:,:,iblk) , & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12U (:,:,iblk), strintxE (:,:,iblk) ) - - call div_stress_Ey (nx_block , ny_block , & - icellE (iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - dxE (:,:,iblk), dyE (:,:,iblk), & - dxU (:,:,iblk), dyT (:,:,iblk), & - earear (:,:,iblk) , & - stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12T (:,:,iblk), strintyE (:,:,iblk) ) - - call div_stress_Nx (nx_block , ny_block , & - icellN (iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - dxN (:,:,iblk), dyN (:,:,iblk), & - dxT (:,:,iblk), dyU (:,:,iblk), & - narear (:,:,iblk) , & - stresspU (:,:,iblk), stressmU (:,:,iblk), & - stress12T (:,:,iblk), strintxN (:,:,iblk) ) - - call div_stress_Ny (nx_block , ny_block , & - icellN (iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - dxN (:,:,iblk), dyN (:,:,iblk), & - dxT (:,:,iblk), dyU (:,:,iblk), & - narear (:,:,iblk) , & - stresspT (:,:,iblk), stressmT (:,:,iblk), & - stress12U (:,:,iblk), strintyN (:,:,iblk) ) - - enddo - !$OMP END PARALLEL DO - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - - call stepuv_CD (nx_block , ny_block , & ! E point - icellE (iblk), Cdn_ocnE (:,:,iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - aiE (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - waterxE (:,:,iblk), wateryE (:,:,iblk), & - forcexE (:,:,iblk), forceyE (:,:,iblk), & - emassdti (:,:,iblk), fmE (:,:,iblk), & - strintxE (:,:,iblk), strintyE (:,:,iblk), & - taubxE (:,:,iblk), taubyE (:,:,iblk), & - uvelE_init(:,:,iblk), vvelE_init(:,:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - TbE (:,:,iblk)) - - call stepuv_CD (nx_block , ny_block , & ! N point - icellN (iblk), Cdn_ocnN (:,:,iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - aiN (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - waterxN (:,:,iblk), wateryN (:,:,iblk), & - forcexN (:,:,iblk), forceyN (:,:,iblk), & - nmassdti (:,:,iblk), fmN (:,:,iblk), & - strintxN (:,:,iblk), strintyN (:,:,iblk), & - taubxN (:,:,iblk), taubyN (:,:,iblk), & - uvelN_init(:,:,iblk), vvelN_init(:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - TbN (:,:,iblk)) - enddo - !$OMP END PARALLEL DO - - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_Eface, field_type_vector, & - uvelE, vvelE) - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_Nface, field_type_vector, & - uvelN, vvelN) - - call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') - call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') - - uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) - vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) - ! U fields at NE corner - ! calls ice_haloUpdate, controls bundles and masks - call dyn_haloUpdate (halo_info, halo_info_mask, & - field_loc_NEcorner, field_type_vector, & - uvel, vvel) - - enddo ! subcycling - - !----------------------------------------------------------------- - ! save quantities for mechanical redistribution - !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call deformationsCD_T (nx_block , ny_block , & - icellT (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - dxN (:,:,iblk), dyE (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - tarear (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv(:,:,iblk), rdg_shear(:,:,iblk)) - enddo - !$OMP END PARALLEL DO - endif ! grid_ice - - call ice_timer_stop(timer_evp_2d) - endif ! evp_algorithm - - deallocate(fld2,fld3,fld4) - if (grid_ice == 'CD' .or. grid_ice == 'C') then - deallocate(strengthU, divergU, tensionU, shearU, deltaU) - deallocate(zetax2T, zetax2U, etax2T, etax2U) - endif - - if (maskhalo_dyn) then - call ice_HaloDestroy(halo_info_mask) - endif - - ! Force symmetry across the tripole seam - if (trim(grid_type) == 'tripole') then - ! TODO: C/CD-grid - if (maskhalo_dyn) then - !------------------------------------------------------- - ! set halomask to zero because ice_HaloMask always keeps - ! local copies AND tripole zipper communication - !------------------------------------------------------- - halomask = 0 - call ice_HaloMask(halo_info_mask, halo_info, halomask) - - call ice_HaloUpdate_stress(stressp_1 , stressp_3 , halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_3 , stressp_1 , halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_2 , stressp_4 , halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_4 , stressp_2 , halo_info_mask, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stressm_1 , stressm_3 , halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_3 , stressm_1 , halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_2 , stressm_4 , halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_4 , stressm_2 , halo_info_mask, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stress12_1, stress12_3, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_3, stress12_1, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_2, stress12_4, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info_mask, & - field_loc_center, field_type_scalar) - - call ice_HaloDestroy(halo_info_mask) - else - call ice_HaloUpdate_stress(stressp_1 , stressp_3 , halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_3 , stressp_1 , halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_2 , stressp_4 , halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_4 , stressp_2 , halo_info, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stressm_1 , stressm_3 , halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_3 , stressm_1 , halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_2 , stressm_4 , halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_4 , stressm_2 , halo_info, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stress12_1, stress12_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_3, stress12_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_2, stress12_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info, & - field_loc_center, field_type_scalar) - endif ! maskhalo - endif ! tripole - - !----------------------------------------------------------------- - ! ice-ocean stress - !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - call dyn_finish & - (nx_block , ny_block , & - icellU (iblk), Cdn_ocnU(:,:,iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - uocnU (:,:,iblk), vocnU (:,:,iblk), & - aiU (:,:,iblk), fmU (:,:,iblk), & - strocnxU(:,:,iblk), strocnyU(:,:,iblk)) - enddo - !$OMP END PARALLEL DO - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - - call dyn_finish & - (nx_block , ny_block , & - icellN (iblk), Cdn_ocnN(:,:,iblk), & - indxNi (:,iblk), indxNj (:,iblk), & - uvelN (:,:,iblk), vvelN (:,:,iblk), & - uocnN (:,:,iblk), vocnN (:,:,iblk), & - aiN (:,:,iblk), fmN (:,:,iblk), & - strocnxN(:,:,iblk), strocnyN(:,:,iblk)) - - call dyn_finish & - (nx_block , ny_block , & - icellE (iblk), Cdn_ocnE(:,:,iblk), & - indxEi (:,iblk), indxEj (:,iblk), & - uvelE (:,:,iblk), vvelE (:,:,iblk), & - uocnE (:,:,iblk), vocnE (:,:,iblk), & - aiE (:,:,iblk), fmE (:,:,iblk), & - strocnxE(:,:,iblk), strocnyE(:,:,iblk)) - - enddo - !$OMP END PARALLEL DO - - endif - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - call ice_HaloUpdate (strintxE, halo_info, & - field_loc_Eface, field_type_vector) - call ice_HaloUpdate (strintyN, halo_info, & - field_loc_Nface, field_type_vector) - call ice_timer_stop(timer_bound) - call grid_average_X2Y('S', strintxE, 'E', strintxU, 'U') ! diagnostic - call grid_average_X2Y('S', strintyN, 'N', strintyU, 'U') ! diagnostic - endif - - call ice_timer_stop(timer_dynamics) ! dynamics - - end subroutine evp - -!======================================================================= -! Computes the rates of strain and internal stress components for -! each of the four corners on each T-grid cell. -! Computes stress terms for the momentum equation -! -! author: Elizabeth C. Hunke, LANL - - subroutine stress (nx_block, ny_block, & - icellT, & - indxTi, indxTj, & - uvel, vvel, & - dxT, dyT, & - dxhy, dyhx, & - cxp, cyp, & - cxm, cym, & - DminTarea, & - strength, & - stressp_1, stressp_2, & - stressp_3, stressp_4, & - stressm_1, stressm_2, & - stressm_3, stressm_4, & - stress12_1, stress12_2, & - stress12_3, stress12_4, & - str ) - - use ice_dyn_shared, only: strain_rates, visc_replpress, capping - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxTi , & ! compressed index in i-direction - indxTj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - strength , & ! ice strength (N/m) - uvel , & ! x-component of velocity (m/s) - vvel , & ! y-component of velocity (m/s) - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - dxhy , & ! 0.5*(HTE - HTW) - dyhx , & ! 0.5*(HTN - HTS) - cyp , & ! 1.5*HTE - 0.5*HTW - cxp , & ! 1.5*HTN - 0.5*HTS - cym , & ! 0.5*HTE - 1.5*HTW - cxm , & ! 0.5*HTN - 1.5*HTS - DminTarea ! deltaminEVP*tarea - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 - stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 - stress12_1,stress12_2,stress12_3,stress12_4 ! sigma12 - - real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(out) :: & - str ! stress combinations - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - divune, divunw, divuse, divusw , & ! divergence - tensionne, tensionnw, tensionse, tensionsw, & ! tension - shearne, shearnw, shearse, shearsw , & ! shearing - Deltane, Deltanw, Deltase, Deltasw , & ! Delt - zetax2ne, zetax2nw, zetax2se, zetax2sw , & ! 2 x zeta (bulk visc) - etax2ne, etax2nw, etax2se, etax2sw , & ! 2 x eta (shear visc) - rep_prsne, rep_prsnw, rep_prsse, rep_prssw, & ! replacement pressure -! puny , & ! puny - ssigpn, ssigps, ssigpe, ssigpw , & - ssigmn, ssigms, ssigme, ssigmw , & - ssig12n, ssig12s, ssig12e, ssig12w , & - ssigp1, ssigp2, ssigm1, ssigm2, ssig121, ssig122, & - csigpne, csigpnw, csigpse, csigpsw , & - csigmne, csigmnw, csigmse, csigmsw , & - csig12ne, csig12nw, csig12se, csig12sw , & - str12ew, str12we, str12ns, str12sn , & - strp_tmp, strm_tmp - - character(len=*), parameter :: subname = '(stress)' - - !----------------------------------------------------------------- - ! Initialize - !----------------------------------------------------------------- - - str(:,:,:) = c0 - - do ij = 1, icellT - i = indxTi(ij) - j = indxTj(ij) - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - call strain_rates (nx_block, ny_block, & - i, j, & - uvel, vvel, & - dxT, dyT, & - cxp, cyp, & - cxm, cym, & - divune, divunw, & - divuse, divusw, & - tensionne, tensionnw, & - tensionse, tensionsw, & - shearne, shearnw, & - shearse, shearsw, & - Deltane, Deltanw, & - Deltase, Deltasw ) - - !----------------------------------------------------------------- - ! viscosities and replacement pressure - !----------------------------------------------------------------- - - call visc_replpress (strength(i,j), DminTarea(i,j), Deltane, & - zetax2ne, etax2ne, rep_prsne, capping) - - call visc_replpress (strength(i,j), DminTarea(i,j), Deltanw, & - zetax2nw, etax2nw, rep_prsnw, capping) - - call visc_replpress (strength(i,j), DminTarea(i,j), Deltasw, & - zetax2sw, etax2sw, rep_prssw, capping) - - call visc_replpress (strength(i,j), DminTarea(i,j), Deltase, & - zetax2se, etax2se, rep_prsse, capping) - - !----------------------------------------------------------------- - ! the stresses ! kg/s^2 - ! (1) northeast, (2) northwest, (3) southwest, (4) southeast - !----------------------------------------------------------------- - - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - - stressp_1 (i,j) = (stressp_1 (i,j)*(c1-arlx1i*revp) & - + arlx1i*(zetax2ne*divune - rep_prsne)) * denom1 - stressp_2 (i,j) = (stressp_2 (i,j)*(c1-arlx1i*revp) & - + arlx1i*(zetax2nw*divunw - rep_prsnw)) * denom1 - stressp_3 (i,j) = (stressp_3 (i,j)*(c1-arlx1i*revp) & - + arlx1i*(zetax2sw*divusw - rep_prssw)) * denom1 - stressp_4 (i,j) = (stressp_4 (i,j)*(c1-arlx1i*revp) & - + arlx1i*(zetax2se*divuse - rep_prsse)) * denom1 - - stressm_1 (i,j) = (stressm_1 (i,j)*(c1-arlx1i*revp) & - + arlx1i*etax2ne*tensionne) * denom1 - stressm_2 (i,j) = (stressm_2 (i,j)*(c1-arlx1i*revp) & - + arlx1i*etax2nw*tensionnw) * denom1 - stressm_3 (i,j) = (stressm_3 (i,j)*(c1-arlx1i*revp) & - + arlx1i*etax2sw*tensionsw) * denom1 - stressm_4 (i,j) = (stressm_4 (i,j)*(c1-arlx1i*revp) & - + arlx1i*etax2se*tensionse) * denom1 - - stress12_1(i,j) = (stress12_1(i,j)*(c1-arlx1i*revp) & - + arlx1i*p5*etax2ne*shearne) * denom1 - stress12_2(i,j) = (stress12_2(i,j)*(c1-arlx1i*revp) & - + arlx1i*p5*etax2nw*shearnw) * denom1 - stress12_3(i,j) = (stress12_3(i,j)*(c1-arlx1i*revp) & - + arlx1i*p5*etax2sw*shearsw) * denom1 - stress12_4(i,j) = (stress12_4(i,j)*(c1-arlx1i*revp) & - + arlx1i*p5*etax2se*shearse) * denom1 - - !----------------------------------------------------------------- - ! Eliminate underflows. - ! The following code is commented out because it is relatively - ! expensive and most compilers include a flag that accomplishes - ! the same thing more efficiently. This code is cheaper than - ! handling underflows if the compiler lacks a flag; uncomment - ! it in that case. The compiler flag is often described with the - ! phrase "flush to zero". - !----------------------------------------------------------------- - -! call icepack_query_parameters(puny_out=puny) -! call icepack_warnings_flush(nu_diag) -! if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & -! file=__FILE__, line=__LINE__) - -! stressp_1(i,j) = sign(max(abs(stressp_1(i,j)),puny),stressp_1(i,j)) -! stressp_2(i,j) = sign(max(abs(stressp_2(i,j)),puny),stressp_2(i,j)) -! stressp_3(i,j) = sign(max(abs(stressp_3(i,j)),puny),stressp_3(i,j)) -! stressp_4(i,j) = sign(max(abs(stressp_4(i,j)),puny),stressp_4(i,j)) - -! stressm_1(i,j) = sign(max(abs(stressm_1(i,j)),puny),stressm_1(i,j)) -! stressm_2(i,j) = sign(max(abs(stressm_2(i,j)),puny),stressm_2(i,j)) -! stressm_3(i,j) = sign(max(abs(stressm_3(i,j)),puny),stressm_3(i,j)) -! stressm_4(i,j) = sign(max(abs(stressm_4(i,j)),puny),stressm_4(i,j)) - -! stress12_1(i,j) = sign(max(abs(stress12_1(i,j)),puny),stress12_1(i,j)) -! stress12_2(i,j) = sign(max(abs(stress12_2(i,j)),puny),stress12_2(i,j)) -! stress12_3(i,j) = sign(max(abs(stress12_3(i,j)),puny),stress12_3(i,j)) -! stress12_4(i,j) = sign(max(abs(stress12_4(i,j)),puny),stress12_4(i,j)) - - !----------------------------------------------------------------- - ! combinations of the stresses for the momentum equation ! kg/s^2 - !----------------------------------------------------------------- - - ssigpn = stressp_1(i,j) + stressp_2(i,j) - ssigps = stressp_3(i,j) + stressp_4(i,j) - ssigpe = stressp_1(i,j) + stressp_4(i,j) - ssigpw = stressp_2(i,j) + stressp_3(i,j) - ssigp1 =(stressp_1(i,j) + stressp_3(i,j))*p055 - ssigp2 =(stressp_2(i,j) + stressp_4(i,j))*p055 - - ssigmn = stressm_1(i,j) + stressm_2(i,j) - ssigms = stressm_3(i,j) + stressm_4(i,j) - ssigme = stressm_1(i,j) + stressm_4(i,j) - ssigmw = stressm_2(i,j) + stressm_3(i,j) - ssigm1 =(stressm_1(i,j) + stressm_3(i,j))*p055 - ssigm2 =(stressm_2(i,j) + stressm_4(i,j))*p055 - - ssig12n = stress12_1(i,j) + stress12_2(i,j) - ssig12s = stress12_3(i,j) + stress12_4(i,j) - ssig12e = stress12_1(i,j) + stress12_4(i,j) - ssig12w = stress12_2(i,j) + stress12_3(i,j) - ssig121 =(stress12_1(i,j) + stress12_3(i,j))*p111 - ssig122 =(stress12_2(i,j) + stress12_4(i,j))*p111 - - csigpne = p111*stressp_1(i,j) + ssigp2 + p027*stressp_3(i,j) - csigpnw = p111*stressp_2(i,j) + ssigp1 + p027*stressp_4(i,j) - csigpsw = p111*stressp_3(i,j) + ssigp2 + p027*stressp_1(i,j) - csigpse = p111*stressp_4(i,j) + ssigp1 + p027*stressp_2(i,j) - - csigmne = p111*stressm_1(i,j) + ssigm2 + p027*stressm_3(i,j) - csigmnw = p111*stressm_2(i,j) + ssigm1 + p027*stressm_4(i,j) - csigmsw = p111*stressm_3(i,j) + ssigm2 + p027*stressm_1(i,j) - csigmse = p111*stressm_4(i,j) + ssigm1 + p027*stressm_2(i,j) - - csig12ne = p222*stress12_1(i,j) + ssig122 & - + p055*stress12_3(i,j) - csig12nw = p222*stress12_2(i,j) + ssig121 & - + p055*stress12_4(i,j) - csig12sw = p222*stress12_3(i,j) + ssig122 & - + p055*stress12_1(i,j) - csig12se = p222*stress12_4(i,j) + ssig121 & - + p055*stress12_2(i,j) - - str12ew = p5*dxT(i,j)*(p333*ssig12e + p166*ssig12w) - str12we = p5*dxT(i,j)*(p333*ssig12w + p166*ssig12e) - str12ns = p5*dyT(i,j)*(p333*ssig12n + p166*ssig12s) - str12sn = p5*dyT(i,j)*(p333*ssig12s + p166*ssig12n) - - !----------------------------------------------------------------- - ! for dF/dx (u momentum) - !----------------------------------------------------------------- - strp_tmp = p25*dyT(i,j)*(p333*ssigpn + p166*ssigps) - strm_tmp = p25*dyT(i,j)*(p333*ssigmn + p166*ssigms) - - ! northeast (i,j) - str(i,j,1) = -strp_tmp - strm_tmp - str12ew & - + dxhy(i,j)*(-csigpne + csigmne) + dyhx(i,j)*csig12ne - - ! northwest (i+1,j) - str(i,j,2) = strp_tmp + strm_tmp - str12we & - + dxhy(i,j)*(-csigpnw + csigmnw) + dyhx(i,j)*csig12nw - - strp_tmp = p25*dyT(i,j)*(p333*ssigps + p166*ssigpn) - strm_tmp = p25*dyT(i,j)*(p333*ssigms + p166*ssigmn) - - ! southeast (i,j+1) - str(i,j,3) = -strp_tmp - strm_tmp + str12ew & - + dxhy(i,j)*(-csigpse + csigmse) + dyhx(i,j)*csig12se - - ! southwest (i+1,j+1) - str(i,j,4) = strp_tmp + strm_tmp + str12we & - + dxhy(i,j)*(-csigpsw + csigmsw) + dyhx(i,j)*csig12sw - - !----------------------------------------------------------------- - ! for dF/dy (v momentum) - !----------------------------------------------------------------- - strp_tmp = p25*dxT(i,j)*(p333*ssigpe + p166*ssigpw) - strm_tmp = p25*dxT(i,j)*(p333*ssigme + p166*ssigmw) - - ! northeast (i,j) - str(i,j,5) = -strp_tmp + strm_tmp - str12ns & - - dyhx(i,j)*(csigpne + csigmne) + dxhy(i,j)*csig12ne - - ! southeast (i,j+1) - str(i,j,6) = strp_tmp - strm_tmp - str12sn & - - dyhx(i,j)*(csigpse + csigmse) + dxhy(i,j)*csig12se - - strp_tmp = p25*dxT(i,j)*(p333*ssigpw + p166*ssigpe) - strm_tmp = p25*dxT(i,j)*(p333*ssigmw + p166*ssigme) - - ! northwest (i+1,j) - str(i,j,7) = -strp_tmp + strm_tmp + str12ns & - - dyhx(i,j)*(csigpnw + csigmnw) + dxhy(i,j)*csig12nw - - ! southwest (i+1,j+1) - str(i,j,8) = strp_tmp - strm_tmp + str12sn & - - dyhx(i,j)*(csigpsw + csigmsw) + dxhy(i,j)*csig12sw - - enddo ! ij - - end subroutine stress - -!======================================================================= -! Computes the strain rates and internal stress components for C grid -! -! author: JF Lemieux, ECCC -! updated: D. Bailey, NCAR -! Nov 2021 -! -! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The -! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. -! -! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method -! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. - - subroutine stressC_T (nx_block, ny_block , & - icellT , & - indxTi , indxTj , & - uvelE , vvelE , & - uvelN , vvelN , & - dxN , dyE , & - dxT , dyT , & - uarea , DminTarea, & - strength, shearU , & - zetax2T , etax2T , & - stressp , stressm ) - - use ice_dyn_shared, only: strain_rates_T, capping, & - visc_replpress, e_factor - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxTi , & ! compressed index in i-direction - indxTj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the E point - uvelN , & ! x-component of velocity (m/s) at the N point - vvelN , & ! y-component of velocity (m/s) at the N point - dxN , & ! width of N-cell through the middle (m) - dyE , & ! height of E-cell through the middle (m) - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - strength , & ! ice strength (N/m) - shearU , & ! shearU - uarea , & ! area of u cell - DminTarea ! deltaminEVP*tarea - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - zetax2T , & ! zetax2 = 2*zeta (bulk viscosity) - etax2T , & ! etax2 = 2*eta (shear viscosity) - stressp , & ! sigma11+sigma22 - stressm ! sigma11-sigma22 - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - divT , & ! divergence at T point - tensionT ! tension at T point - - real (kind=dbl_kind) :: & - shearTsqr , & ! strain rates squared at T point - DeltaT , & ! delt at T point - rep_prsT ! replacement pressure at T point - - character(len=*), parameter :: subname = '(stressC_T)' - - !----------------------------------------------------------------- - ! Initialize - !----------------------------------------------------------------- - - call strain_rates_T (nx_block , ny_block , & - icellT , & - indxTi(:) , indxTj (:) , & - uvelE (:,:), vvelE (:,:), & - uvelN (:,:), vvelN (:,:), & - dxN (:,:), dyE (:,:), & - dxT (:,:), dyT (:,:), & - divT (:,:), tensionT(:,:) ) - - do ij = 1, icellT - i = indxTi(ij) - j = indxTj(ij) - - !----------------------------------------------------------------- - ! Square of shear strain rate at T obtained from interpolation of - ! U point values (Bouillon et al., 2013, Kimmritz et al., 2016 - !----------------------------------------------------------------- - - shearTsqr = (shearU(i ,j )**2 * uarea(i ,j ) & - + shearU(i ,j-1)**2 * uarea(i ,j-1) & - + shearU(i-1,j-1)**2 * uarea(i-1,j-1) & - + shearU(i-1,j )**2 * uarea(i-1,j )) & - / (uarea(i,j)+uarea(i,j-1)+uarea(i-1,j-1)+uarea(i-1,j)) - - DeltaT = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearTsqr)) - - !----------------------------------------------------------------- - ! viscosities and replacement pressure at T point - !----------------------------------------------------------------- - - call visc_replpress (strength(i,j), DminTarea(i,j), DeltaT, & - zetax2T (i,j), etax2T (i,j), rep_prsT, capping) - - !----------------------------------------------------------------- - ! the stresses ! kg/s^2 - !----------------------------------------------------------------- - - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - - stressp(i,j) = (stressp(i,j)*(c1-arlx1i*revp) & - + arlx1i*(zetax2T(i,j)*divT(i,j) - rep_prsT)) * denom1 - - stressm(i,j) = (stressm(i,j)*(c1-arlx1i*revp) & - + arlx1i*etax2T(i,j)*tensionT(i,j)) * denom1 - - enddo ! ij - - end subroutine stressC_T - -!======================================================================= -! -! Computes the strain rates and internal stress components for U points -! -! author: JF Lemieux, ECCC -! Nov 2021 -! -! Bouillon, S., T. Fichefet, V. Legat and G. Madec (2013). The -! elastic-viscous-plastic method revisited. Ocean Model., 71, 2-12. -! -! Kimmritz, M., S. Danilov and M. Losch (2016). The adaptive EVP method -! for solving the sea ice momentum equation. Ocean Model., 101, 59-67. - - subroutine stressC_U (nx_block , ny_block, & - icellU, & - indxUi , indxUj, & - uarea , & - etax2U , deltaU, & - strengthU, shearU, & - stress12 ) - - use ice_dyn_shared, only: visc_replpress, & - visc_method, deltaminEVP, capping - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU ! no. of cells where iceUmask = 1 - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uarea , & ! area of U point - etax2U , & ! 2*eta at the U point - shearU , & ! shearU array - deltaU , & ! deltaU array - strengthU ! ice strength at the U point - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - stress12 ! sigma12 - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - lzetax2U , & ! bulk viscosity at U point - letax2U , & ! shear viscosity at U point - lrep_prsU, & ! replacement pressure at U point - DminUarea ! Dmin on U - - character(len=*), parameter :: subname = '(stressC_U)' - - !----------------------------------------------------------------- - ! viscosities and replacement pressure at U point - ! avg_zeta: Bouillon et al. 2013, C1 method of Kimmritz et al. 2016 - ! avg_strength: C2 method of Kimmritz et al. 2016 - ! if outside do and stress12 equation repeated in each loop for performance - !----------------------------------------------------------------- - - if (visc_method == 'avg_zeta') then - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) - stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & - + arlx1i*p5*etax2U(i,j)*shearU(i,j)) * denom1 - enddo - - elseif (visc_method == 'avg_strength') then - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) - DminUarea = deltaminEVP*uarea(i,j) - ! only need etax2U here, but other terms are calculated with etax2U - ! minimal extra calculations here even though it seems like there is - call visc_replpress (strengthU(i,j), DminUarea, DeltaU(i,j), & - lzetax2U , letax2U , lrep_prsU , capping) - stress12(i,j) = (stress12(i,j)*(c1-arlx1i*revp) & - + arlx1i*p5*letax2U*shearU(i,j)) * denom1 - enddo - - endif - - end subroutine stressC_U - -!======================================================================= -! Computes the strain rates and internal stress components for T points -! -! author: JF Lemieux, ECCC -! Nov 2021 - - subroutine stressCD_T (nx_block, ny_block, & - icellT, & - indxTi, indxTj, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - DminTarea, & - strength, & - zetax2T, etax2T, & - stresspT, stressmT, & - stress12T ) - - use ice_dyn_shared, only: strain_rates_T, capping, & - visc_replpress - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxTi , & ! compressed index in i-direction - indxTj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the N point - uvelN , & ! x-component of velocity (m/s) at the E point - vvelN , & ! y-component of velocity (m/s) at the N point - dxN , & ! width of N-cell through the middle (m) - dyE , & ! height of E-cell through the middle (m) - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - strength , & ! ice strength (N/m) - DminTarea ! deltaminEVP*tarea - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - zetax2T , & ! zetax2 = 2*zeta (bulk viscosity) - etax2T , & ! etax2 = 2*eta (shear viscosity) - stresspT , & ! sigma11+sigma22 - stressmT , & ! sigma11-sigma22 - stress12T ! sigma12 - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - divT , & ! divergence at T point - tensionT , & ! tension at T point - shearT , & ! sheat at T point - DeltaT ! delt at T point - - real (kind=dbl_kind) :: & - rep_prsT ! replacement pressure at T point - - character(len=*), parameter :: subname = '(stressCD_T)' - - !----------------------------------------------------------------- - ! strain rates at T point - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - call strain_rates_T (nx_block , ny_block , & - icellT , & - indxTi(:) , indxTj (:) , & - uvelE (:,:), vvelE (:,:), & - uvelN (:,:), vvelN (:,:), & - dxN (:,:), dyE (:,:), & - dxT (:,:), dyT (:,:), & - divT (:,:), tensionT(:,:), & - shearT(:,:), DeltaT (:,:) ) - - do ij = 1, icellT - i = indxTi(ij) - j = indxTj(ij) - - !----------------------------------------------------------------- - ! viscosities and replacement pressure at T point - !----------------------------------------------------------------- - - call visc_replpress (strength(i,j), DminTarea(i,j), DeltaT(i,j), & - zetax2T (i,j), etax2T (i,j), rep_prsT , capping) - - !----------------------------------------------------------------- - ! the stresses ! kg/s^2 - !----------------------------------------------------------------- - - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - - stresspT(i,j) = (stresspT (i,j)*(c1-arlx1i*revp) & - + arlx1i*(zetax2T(i,j)*divT(i,j) - rep_prsT)) * denom1 - - stressmT(i,j) = (stressmT (i,j)*(c1-arlx1i*revp) & - + arlx1i*etax2T(i,j)*tensionT(i,j)) * denom1 - - stress12T(i,j) = (stress12T(i,j)*(c1-arlx1i*revp) & - + arlx1i*p5*etax2T(i,j)*shearT(i,j)) * denom1 - - enddo ! ij - - end subroutine stressCD_T - -!======================================================================= -! Computes the strain rates and internal stress components for U points -! -! author: JF Lemieux, ECCC -! Nov 2021 - - subroutine stressCD_U (nx_block, ny_block, & - icellU, & - indxUi, indxUj, & - uarea, & - zetax2U, etax2U, & - strengthU, & - divergU, tensionU, & - shearU, DeltaU, & - stresspU, stressmU, & - stress12U ) - - use ice_dyn_shared, only: strain_rates_U, & - visc_replpress, & - visc_method, deltaminEVP, capping - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU ! no. of cells where iceUmask = 1 - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uarea , & ! area of U-cell (m^2) - zetax2U , & ! 2*zeta at U point - etax2U , & ! 2*eta at U point - strengthU, & ! ice strength at U point - divergU , & ! div at U point - tensionU , & ! tension at U point - shearU , & ! shear at U point - deltaU ! delt at U point - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - stresspU , & ! sigma11+sigma22 - stressmU , & ! sigma11-sigma22 - stress12U ! sigma12 - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - lzetax2U , & ! bulk viscosity at U point - letax2U , & ! shear viscosity at U point - lrep_prsU , & ! replacement pressure at U point - DminUarea ! Dmin on U - - character(len=*), parameter :: subname = '(stressCD_U)' - - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) - - !----------------------------------------------------------------- - ! viscosities and replacement pressure at U point - ! avg_zeta: Bouillon et al. 2013, C1 method of Kimmritz et al. 2016 - ! avg_strength: C2 method of Kimmritz et al. 2016 - !----------------------------------------------------------------- - - if (visc_method == 'avg_zeta') then - lzetax2U = zetax2U(i,j) - letax2U = etax2U(i,j) - lrep_prsU = (c1-Ktens)/(c1+Ktens)*lzetax2U*deltaU(i,j) - - elseif (visc_method == 'avg_strength') then - DminUarea = deltaminEVP*uarea(i,j) - ! only need etax2U here, but other terms are calculated with etax2U - ! minimal extra calculations here even though it seems like there is - call visc_replpress (strengthU(i,j), DminUarea, DeltaU(i,j), & - lzetax2U , letax2U , lrep_prsU , capping) - endif - - !----------------------------------------------------------------- - ! the stresses ! kg/s^2 - !----------------------------------------------------------------- - - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - - stresspU(i,j) = (stresspU (i,j)*(c1-arlx1i*revp) & - + arlx1i*(lzetax2U*divergU(i,j) - lrep_prsU)) * denom1 - - stressmU(i,j) = (stressmU (i,j)*(c1-arlx1i*revp) & - + arlx1i*letax2U*tensionU(i,j)) * denom1 - - stress12U(i,j) = (stress12U(i,j)*(c1-arlx1i*revp) & - + arlx1i*p5*letax2U*shearU(i,j)) * denom1 - - enddo ! ij - - end subroutine stressCD_U - -!======================================================================= -! Computes divergence of stress tensor at the E or N point for the mom equation -! -! author: JF Lemieux, ECCC -! Nov 2021 -! -! Hunke, E. C., and J. K. Dukowicz (2002). The Elastic-Viscous-Plastic -! Sea Ice Dynamics Model in General Orthogonal Curvilinear Coordinates -! on a Sphere - Incorporation of Metric Terms. Mon. Weather Rev., -! 130, 1848-1865. -! -! Bouillon, S., M. Morales Maqueda, V. Legat and T. Fichefet (2009). An -! elastic-viscous-plastic sea ice model formulated on Arakawa B and C grids. -! Ocean Model., 27, 174-184. - - subroutine div_stress_Ex(nx_block, ny_block, & - icell , & - indxi , indxj , & - dxE , dyE , & - dxU , dyT , & - arear , & - stressp , stressm , & - stress12, & - strintx ) - - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icell ! no. of cells where epm (or npm) = 1 - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxi , & ! compressed index in i-direction - indxj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - dxE , & ! width of E or N-cell through the middle (m) - dyE , & ! height of E or N-cell through the middle (m) - dxU , & ! width of T or U-cell through the middle (m) - dyT , & ! height of T or U-cell through the middle (m) - arear ! earear or narear - - real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & - stressp , & ! stressp (U or T) used for strintx calculation - stressm , & ! stressm (U or T) used for strintx calculation - stress12 ! stress12 (U or T) used for strintx calculation - - real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & - strintx ! div of stress tensor for u component - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - character(len=*), parameter :: subname = '(div_stress_Ex)' - - do ij = 1, icell - i = indxi(ij) - j = indxj(ij) - strintx(i,j) = arear(i,j) * & - ( p5 * dyE(i,j) * ( stressp(i+1,j ) - stressp (i ,j ) ) & - + (p5/ dyE(i,j)) * ( (dyT(i+1,j )**2) * stressm (i+1,j ) & - -(dyT(i ,j )**2) * stressm (i ,j ) ) & - + (c1/ dxE(i,j)) * ( (dxU(i ,j )**2) * stress12(i ,j ) & - -(dxU(i ,j-1)**2) * stress12(i ,j-1) ) ) - enddo - - end subroutine div_stress_Ex - -!======================================================================= - subroutine div_stress_Ey(nx_block, ny_block, & - icell , & - indxi , indxj , & - dxE , dyE , & - dxU , dyT , & - arear , & - stressp , stressm , & - stress12, & - strinty ) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icell ! no. of cells where epm (or npm) = 1 - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxi , & ! compressed index in i-direction - indxj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - dxE , & ! width of E or N-cell through the middle (m) - dyE , & ! height of E or N-cell through the middle (m) - dxU , & ! width of T or U-cell through the middle (m) - dyT , & ! height of T or U-cell through the middle (m) - arear ! earear or narear - - real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & - stressp , & ! stressp (U or T) used for strinty calculation - stressm , & ! stressm (U or T) used for strinty calculation - stress12 ! stress12 (U or T) used for strinty calculation - - real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & - strinty ! div of stress tensor for v component - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - character(len=*), parameter :: subname = '(div_stress_Ey)' - - do ij = 1, icell - i = indxi(ij) - j = indxj(ij) - strinty(i,j) = arear(i,j) * & - ( p5 * dxE(i,j) * ( stressp(i ,j ) - stressp (i ,j-1) ) & - - (p5/ dxE(i,j)) * ( (dxU(i ,j )**2) * stressm (i ,j ) & - -(dxU(i ,j-1)**2) * stressm (i ,j-1) ) & - + (c1/ dyE(i,j)) * ( (dyT(i+1,j )**2) * stress12(i+1,j ) & - -(dyT(i ,j )**2) * stress12(i ,j ) ) ) - enddo - - end subroutine div_stress_Ey - -!======================================================================= - subroutine div_stress_Nx(nx_block, ny_block, & - icell , & - indxi , indxj , & - dxN , dyN , & - dxT , dyU , & - arear , & - stressp , stressm , & - stress12, & - strintx ) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icell ! no. of cells where epm (or npm) = 1 - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxi , & ! compressed index in i-direction - indxj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - dxN , & ! width of E or N-cell through the middle (m) - dyN , & ! height of E or N-cell through the middle (m) - dxT , & ! width of T or U-cell through the middle (m) - dyU , & ! height of T or U-cell through the middle (m) - arear ! earear or narear - - real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & - stressp , & ! stressp (U or T) used for strintx calculation - stressm , & ! stressm (U or T) used for strintx calculation - stress12 ! stress12 (U or T) used for strintx calculation - - real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & - strintx ! div of stress tensor for u component - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - character(len=*), parameter :: subname = '(div_stress_Nx)' - - do ij = 1, icell - i = indxi(ij) - j = indxj(ij) - strintx(i,j) = arear(i,j) * & - ( p5 * dyN(i,j) * ( stressp(i ,j ) - stressp (i-1,j ) ) & - + (p5/ dyN(i,j)) * ( (dyU(i ,j )**2) * stressm (i ,j ) & - -(dyU(i-1,j )**2) * stressm (i-1,j ) ) & - + (c1/ dxN(i,j)) * ( (dxT(i ,j+1)**2) * stress12(i ,j+1) & - -(dxT(i ,j )**2) * stress12(i ,j ) ) ) - enddo - - end subroutine div_stress_Nx - -!======================================================================= - subroutine div_stress_Ny(nx_block, ny_block, & - icell , & - indxi , indxj , & - dxN , dyN , & - dxT , dyU , & - arear , & - stressp , stressm , & - stress12, & - strinty ) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icell ! no. of cells where epm (or npm) = 1 - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxi , & ! compressed index in i-direction - indxj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - dxN , & ! width of E or N-cell through the middle (m) - dyN , & ! height of E or N-cell through the middle (m) - dxT , & ! width of T or U-cell through the middle (m) - dyU , & ! height of T or U-cell through the middle (m) - arear ! earear or narear - - real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(in) :: & - stressp , & ! stressp (U or T) used for strinty calculation - stressm , & ! stressm (U or T) used for strinty calculation - stress12 ! stress12 (U or T) used for strinty calculation - - real (kind=dbl_kind), optional, dimension (nx_block,ny_block), intent(out) :: & - strinty ! div of stress tensor for v component - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - character(len=*), parameter :: subname = '(div_stress_Ny)' - - do ij = 1, icell - i = indxi(ij) - j = indxj(ij) - strinty(i,j) = arear(i,j) * & - ( p5 * dxN(i,j) * ( stressp(i ,j+1) - stressp (i ,j ) ) & - - (p5/ dxN(i,j)) * ( (dxT(i ,j+1)**2) * stressm (i ,j+1) & - -(dxT(i ,j )**2) * stressm (i ,j ) ) & - + (c1/ dyN(i,j)) * ( (dyU(i ,j )**2) * stress12(i ,j ) & - -(dyU(i-1,j )**2) * stress12(i-1,j ) ) ) - enddo - - end subroutine div_stress_Ny - -!======================================================================= - - end module ice_dyn_evp - -!======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 b/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 deleted file mode 100644 index 187ec55cc..000000000 --- a/cicecore/cicedynB/dynamics/ice_dyn_shared.F90 +++ /dev/null @@ -1,2937 +0,0 @@ -!======================================================================= - -! Elastic-viscous-plastic sea ice dynamics model code shared with other -! approaches -! -! author: Elizabeth C. Hunke, LANL -! -! 2013: Split from ice_dyn_evp.F90 by Elizabeth Hunke - - module ice_dyn_shared - - use ice_kinds_mod - use ice_communicate, only: my_task, master_task, get_num_procs - use ice_constants, only: c0, c1, c2, c3, c4, c6 - use ice_constants, only: omega, spval_dbl, p01, p001, p5 - use ice_blocks, only: nx_block, ny_block - use ice_domain_size, only: max_blocks - use ice_fileunits, only: nu_diag - use ice_exit, only: abort_ice - use ice_grid, only: grid_ice - use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted - use icepack_intfc, only: icepack_query_parameters - - implicit none - private - public :: set_evp_parameters, stepu, stepuv_CD, stepu_C, stepv_C, & - principal_stress, init_dyn, dyn_prep1, dyn_prep2, dyn_finish, & - seabed_stress_factor_LKD, seabed_stress_factor_prob, & - alloc_dyn_shared, & - deformations, deformationsC_T, deformationsCD_T, & - strain_rates, strain_rates_T, strain_rates_U, & - visc_replpress, & - dyn_haloUpdate, & - stack_fields, unstack_fields - - ! namelist parameters - - integer (kind=int_kind), public :: & - kdyn , & ! type of dynamics ( -1, 0 = off, 1 = evp, 2 = eap ) - kridge , & ! set to "-1" to turn off ridging - ndte ! number of subcycles - - character (len=char_len), public :: & - coriolis , & ! 'constant', 'zero', or 'latitude' - ssh_stress ! 'geostrophic' or 'coupled' - - logical (kind=log_kind), public :: & - revised_evp ! if true, use revised evp procedure - - character (len=char_len), public :: & - evp_algorithm ! standard_2d = 2D org version (standard) - ! shared_mem_1d = 1d without mpi call and refactorization to 1d - - real (kind=dbl_kind), public :: & - elasticDamp ! coefficient for calculating the parameter E, elastic damping parameter - - ! other EVP parameters - - character (len=char_len), public :: & - yield_curve , & ! 'ellipse' ('teardrop' needs further testing) - visc_method , & ! method for viscosity calc at U points (C, CD grids) - seabed_stress_method ! method for seabed stress calculation - ! LKD: Lemieux et al. 2015, probabilistic: Dupont et al. in prep. - - real (kind=dbl_kind), parameter, public :: & - u0 = 5e-5_dbl_kind, & ! residual velocity for seabed stress (m/s) - cosw = c1 , & ! cos(ocean turning angle) ! turning angle = 0 - sinw = c0 , & ! sin(ocean turning angle) ! turning angle = 0 - a_min = p001 , & ! minimum ice area - m_min = p01 ! minimum ice mass (kg/m^2) - - real (kind=dbl_kind), public :: & - revp , & ! 0 for classic EVP, 1 for revised EVP - e_yieldcurve, & ! VP aspect ratio of elliptical yield curve - e_plasticpot, & ! VP aspect ratio of elliptical plastic potential - epp2i , & ! 1/(e_plasticpot)^2 - e_factor , & ! (e_yieldcurve)^2/(e_plasticpot)^4 - ecci , & ! temporary for 1d evp - deltaminEVP , & ! minimum delta for viscosities (EVP) - deltaminVP , & ! minimum delta for viscosities (VP) - capping , & ! capping of viscosities (1=Hibler79, 0=Kreyscher2000) - dtei , & ! ndte/dt, where dt/ndte is subcycling timestep (1/s) - denom1 ! constants for stress equation - - real (kind=dbl_kind), public :: & ! Bouillon et al relaxation constants - arlx , & ! alpha for stressp - arlx1i , & ! (inverse of alpha) for stressp - brlx ! beta for momentum - - real (kind=dbl_kind), allocatable, public :: & - fcor_blk(:,:,:) ! Coriolis parameter (1/s) - - real (kind=dbl_kind), allocatable, public :: & - fcorE_blk(:,:,:), & ! Coriolis parameter at E points (1/s) - fcorN_blk(:,:,:) ! Coriolis parameter at N points (1/s) - - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - uvel_init , & ! x-component of velocity (m/s), beginning of timestep - vvel_init ! y-component of velocity (m/s), beginning of timestep - - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - uvelN_init , & ! x-component of velocity (m/s), beginning of timestep - vvelN_init ! y-component of velocity (m/s), beginning of timestep - - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - uvelE_init , & ! x-component of velocity (m/s), beginning of timestep - vvelE_init ! y-component of velocity (m/s), beginning of timestep - - logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & - iceTmask, & ! ice extent mask (T-cell) - iceUmask, & ! ice extent mask (U-cell) - iceNmask, & ! ice extent mask (N-cell) - iceEmask ! ice extent mask (E-cell) - - real (kind=dbl_kind), allocatable, public :: & - DminTarea(:,:,:) ! deltamin * tarea (m^2/s) - - ! ice isotropic tensile strength parameter - real (kind=dbl_kind), public :: & - Ktens ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) - - ! seabed (basal) stress parameters and settings - logical (kind=log_kind), public :: & - seabed_stress ! if true, seabed stress for landfast on - - real (kind=dbl_kind), public :: & - k1 , & ! 1st free parameter for seabed1 grounding parameterization - k2 , & ! second free parameter (N/m^3) for seabed1 grounding parametrization - alphab , & ! alphab=Cb factor in Lemieux et al 2015 - threshold_hw ! max water depth for grounding - ! see keel data from Amundrud et al. 2004 (JGR) - - interface strain_rates_T - module procedure strain_rates_Tdt - module procedure strain_rates_Tdtsd - end interface - - interface dyn_haloUpdate - module procedure dyn_haloUpdate1 - module procedure dyn_haloUpdate2 - module procedure dyn_haloUpdate3 - module procedure dyn_haloUpdate4 - module procedure dyn_haloUpdate5 - end interface - - interface stack_fields - module procedure stack_fields2 - module procedure stack_fields3 - module procedure stack_fields4 - module procedure stack_fields5 - end interface - - interface unstack_fields - module procedure unstack_fields2 - module procedure unstack_fields3 - module procedure unstack_fields4 - module procedure unstack_fields5 - end interface - -!======================================================================= - - contains - -!======================================================================= -! -! Allocate space for all variables -! - subroutine alloc_dyn_shared - - integer (int_kind) :: ierr - - character(len=*), parameter :: subname = '(alloc_dyn_shared)' - - allocate( & - uvel_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep - vvel_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep - iceTmask (nx_block,ny_block,max_blocks), & ! T mask for dynamics - iceUmask (nx_block,ny_block,max_blocks), & ! U mask for dynamics - stat=ierr) - if (ierr/=0) call abort_ice(subname//': Out of memory') - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - allocate( & - uvelE_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep - vvelE_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep - uvelN_init (nx_block,ny_block,max_blocks), & ! x-component of velocity (m/s), beginning of timestep - vvelN_init (nx_block,ny_block,max_blocks), & ! y-component of velocity (m/s), beginning of timestep - iceEmask (nx_block,ny_block,max_blocks), & ! T mask for dynamics - iceNmask (nx_block,ny_block,max_blocks), & ! U mask for dynamics - stat=ierr) - if (ierr/=0) call abort_ice(subname//': Out of memory') - endif - - end subroutine alloc_dyn_shared - -!======================================================================= -! Initialize parameters and variables needed for the dynamics -! author: Elizabeth C. Hunke, LANL - - subroutine init_dyn (dt) - - use ice_blocks, only: nx_block, ny_block - use ice_domain, only: nblocks, halo_dynbundle - use ice_domain_size, only: max_blocks - use ice_flux, only: rdg_conv, rdg_shear, & - stressp_1, stressp_2, stressp_3, stressp_4, & - stressm_1, stressm_2, stressm_3, stressm_4, & - stress12_1, stress12_2, stress12_3, stress12_4, & - stresspT, stressmT, stress12T, & - stresspU, stressmU, stress12U - use ice_state, only: uvel, vvel, uvelE, vvelE, uvelN, vvelN, divu, shear - use ice_grid, only: ULAT, NLAT, ELAT, tarea - - real (kind=dbl_kind), intent(in) :: & - dt ! time step - - ! local variables - - integer (kind=int_kind) :: & - i, j , & ! indices - nprocs, & ! number of processors - iblk ! block index - - character(len=*), parameter :: subname = '(init_dyn)' - - call set_evp_parameters (dt) - - ! Set halo_dynbundle, this is empirical at this point, could become namelist - halo_dynbundle = .true. - nprocs = get_num_procs() - if (nx_block*ny_block/nprocs > 100) halo_dynbundle = .false. - - if (my_task == master_task) then - write(nu_diag,*) 'dt = ',dt - write(nu_diag,*) 'dt_subcyle = ',dt/real(ndte,kind=dbl_kind) - write(nu_diag,*) 'tdamp =', elasticDamp * dt - write(nu_diag,*) 'halo_dynbundle =', halo_dynbundle - endif - - allocate(fcor_blk(nx_block,ny_block,max_blocks)) - allocate(DminTarea(nx_block,ny_block,max_blocks)) - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - allocate(fcorE_blk(nx_block,ny_block,max_blocks)) - allocate(fcorN_blk(nx_block,ny_block,max_blocks)) - endif - - !$OMP PARALLEL DO PRIVATE(iblk,i,j) SCHEDULE(runtime) - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - - ! velocity - uvel(i,j,iblk) = c0 ! m/s - vvel(i,j,iblk) = c0 ! m/s - if (grid_ice == 'CD' .or. grid_ice == 'C') then ! extra velocity variables - uvelE(i,j,iblk) = c0 - vvelE(i,j,iblk) = c0 - uvelN(i,j,iblk) = c0 - vvelN(i,j,iblk) = c0 - endif - - ! strain rates - divu (i,j,iblk) = c0 - shear(i,j,iblk) = c0 - rdg_conv (i,j,iblk) = c0 - rdg_shear(i,j,iblk) = c0 - - ! Coriolis parameter - if (trim(coriolis) == 'constant') then - fcor_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s - else if (trim(coriolis) == 'zero') then - fcor_blk(i,j,iblk) = c0 - else - fcor_blk(i,j,iblk) = c2*omega*sin(ULAT(i,j,iblk)) ! 1/s - endif - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - - if (trim(coriolis) == 'constant') then - fcorE_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s - fcorN_blk(i,j,iblk) = 1.46e-4_dbl_kind ! Hibler 1979, N. Hem; 1/s - else if (trim(coriolis) == 'zero') then - fcorE_blk(i,j,iblk) = c0 - fcorN_blk(i,j,iblk) = c0 - else - fcorE_blk(i,j,iblk) = c2*omega*sin(ELAT(i,j,iblk)) ! 1/s - fcorN_blk(i,j,iblk) = c2*omega*sin(NLAT(i,j,iblk)) ! 1/s - endif - - endif - - ! stress tensor, kg/s^2 - stressp_1 (i,j,iblk) = c0 - stressp_2 (i,j,iblk) = c0 - stressp_3 (i,j,iblk) = c0 - stressp_4 (i,j,iblk) = c0 - stressm_1 (i,j,iblk) = c0 - stressm_2 (i,j,iblk) = c0 - stressm_3 (i,j,iblk) = c0 - stressm_4 (i,j,iblk) = c0 - stress12_1(i,j,iblk) = c0 - stress12_2(i,j,iblk) = c0 - stress12_3(i,j,iblk) = c0 - stress12_4(i,j,iblk) = c0 - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - stresspT (i,j,iblk) = c0 - stressmT (i,j,iblk) = c0 - stress12T (i,j,iblk) = c0 - stresspU (i,j,iblk) = c0 - stressmU (i,j,iblk) = c0 - stress12U (i,j,iblk) = c0 - endif - - if (kdyn == 1) then - DminTarea(i,j,iblk) = deltaminEVP*tarea(i,j,iblk) - elseif (kdyn == 3) then - DminTarea(i,j,iblk) = deltaminVP*tarea(i,j,iblk) - endif - - ! ice extent mask on velocity points - iceUmask(i,j,iblk) = .false. - if (grid_ice == 'CD' .or. grid_ice == 'C') then - iceEmask(i,j,iblk) = .false. - iceNmask(i,j,iblk) = .false. - end if - enddo ! i - enddo ! j - enddo ! iblk - !$OMP END PARALLEL DO - - end subroutine init_dyn - -!======================================================================= -! Set parameters needed for the evp dynamics. -! Note: This subroutine is currently called only during initialization. -! If the dynamics time step can vary during runtime, it should -! be called whenever the time step changes. -! -! author: Elizabeth C. Hunke, LANL - - subroutine set_evp_parameters (dt) - - real (kind=dbl_kind), intent(in) :: & - dt ! time step - - ! local variables - - character(len=*), parameter :: subname = '(set_evp_parameters)' - - ! elastic time step - dtei = real(ndte,kind=dbl_kind)/dt - - ! variables for elliptical yield curve and plastic potential - epp2i = c1/e_plasticpot**2 - e_factor = e_yieldcurve**2 / e_plasticpot**4 - ecci = c1/e_yieldcurve**2 ! temporary for 1d evp - - if (revised_evp) then ! Bouillon et al, Ocean Mod 2013 - revp = c1 - denom1 = c1 - arlx1i = c1/arlx - else ! Hunke, JCP 2013 with modified stress eq - revp = c0 - arlx = c2 * elasticDamp * real(ndte,kind=dbl_kind) - arlx1i = c1/arlx - brlx = real(ndte,kind=dbl_kind) - denom1 = c1/(c1+arlx1i) - endif - if (my_task == master_task) then - write (nu_diag,*) 'arlx, arlxi, brlx, denom1', & - arlx, arlx1i, brlx, denom1 - endif - - end subroutine set_evp_parameters - -!======================================================================= -! Computes quantities needed in the stress tensor (sigma) -! and momentum (u) equations, but which do not change during -! the thermodynamics/transport time step: -! ice mass and ice extent masks -! -! author: Elizabeth C. Hunke, LANL - - subroutine dyn_prep1 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - aice, vice, & - vsno, Tmask, & - Tmass, iceTmask) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - aice , & ! concentration of ice - vice , & ! volume per unit area of ice (m) - vsno ! volume per unit area of snow (m) - - logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & - Tmask ! land/boundary mask, thickness (T-cell) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - Tmass ! total mass of ice and snow (kg/m^2) - - logical (kind=log_kind), dimension (nx_block,ny_block), intent(out) :: & - iceTmask ! ice extent mask (T-cell) - - ! local variables - - integer (kind=int_kind) :: & - i, j - - real (kind=dbl_kind) :: & - rhoi, rhos - - logical (kind=log_kind), dimension(nx_block,ny_block) :: & - tmphm ! temporary mask - - character(len=*), parameter :: subname = '(dyn_prep1)' - - call icepack_query_parameters(rhos_out=rhos, rhoi_out=rhoi) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - do j = 1, ny_block - do i = 1, nx_block - - !----------------------------------------------------------------- - ! total mass of ice and snow, centered in T-cell - ! NOTE: vice and vsno must be up to date in all grid cells, - ! including ghost cells - !----------------------------------------------------------------- - if (Tmask(i,j)) then - Tmass(i,j) = (rhoi*vice(i,j) + rhos*vsno(i,j)) ! kg/m^2 - else - Tmass(i,j) = c0 - endif - - !----------------------------------------------------------------- - ! ice extent mask (T-cells) - !----------------------------------------------------------------- - tmphm(i,j) = Tmask(i,j) .and. (aice (i,j) > a_min) & - .and. (Tmass(i,j) > m_min) - - !----------------------------------------------------------------- - ! augmented mask (land + open ocean) - !----------------------------------------------------------------- - iceTmask (i,j) = .false. - - enddo - enddo - - do j = jlo, jhi - do i = ilo, ihi - - ! extend ice extent mask (T-cells) to points around pack - if (tmphm(i-1,j+1) .or. tmphm(i,j+1) .or. tmphm(i+1,j+1) .or. & - tmphm(i-1,j) .or. tmphm(i,j) .or. tmphm(i+1,j) .or. & - tmphm(i-1,j-1) .or. tmphm(i,j-1) .or. tmphm(i+1,j-1) ) then - iceTmask(i,j) = .true. - endif - - if (.not.Tmask(i,j)) iceTmask(i,j) = .false. - - enddo - enddo - - end subroutine dyn_prep1 - -!======================================================================= -! Computes quantities needed in the stress tensor (sigma) -! and momentum (u) equations, but which do not change during -! the thermodynamics/transport time step: -! --wind stress shift to U grid, -! --ice mass and ice extent masks, -! initializes ice velocity for new points to ocean sfc current -! -! author: Elizabeth C. Hunke, LANL - - subroutine dyn_prep2 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - icellT, icellX, & - indxTi, indxTj, & - indxXi, indxXj, & - aiX, Xmass, & - Xmassdti, fcor, & - Xmask, & - uocn, vocn, & - strairx, strairy, & - ss_tltx, ss_tlty, & - iceTmask, iceXmask, & - fm, dt, & - strtltx, strtlty, & - strocnx, strocny, & - strintx, strinty, & - taubx, tauby, & - waterx, watery, & - forcex, forcey, & - stressp_1, stressp_2, & - stressp_3, stressp_4, & - stressm_1, stressm_2, & - stressm_3, stressm_4, & - stress12_1, stress12_2, & - stress12_3, stress12_4, & - uvel_init, vvel_init, & - uvel, vvel, & - TbU) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - integer (kind=int_kind), intent(out) :: & - icellT , & ! no. of cells where iceTmask = .true. - icellX ! no. of cells where iceXmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(out) :: & - indxTi , & ! compressed index in i-direction on T grid - indxTj , & ! compressed index in j-direction - indxXi , & ! compressed index in i-direction on X grid, grid depends on call - indxXj ! compressed index in j-direction - - logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & - Xmask ! land/boundary mask, thickness (X-grid-cell) - - logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & - iceTmask ! ice extent mask (T-cell) - - logical (kind=log_kind), dimension (nx_block,ny_block), intent(inout) :: & - iceXmask ! ice extent mask (X-grid-cell) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - aiX , & ! ice fraction on u-grid (X grid) - Xmass , & ! total mass of ice and snow (X grid) - fcor , & ! Coriolis parameter (1/s) - strairx , & ! stress on ice by air, x-direction - strairy , & ! stress on ice by air, y-direction - uocn , & ! ocean current, x-direction (m/s) - vocn , & ! ocean current, y-direction (m/s) - ss_tltx , & ! sea surface slope, x-direction (m/m) - ss_tlty ! sea surface slope, y-direction - - real (kind=dbl_kind), intent(in) :: & - dt ! time step - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - TbU, & ! seabed stress factor (N/m^2) - uvel_init,& ! x-component of velocity (m/s), beginning of time step - vvel_init,& ! y-component of velocity (m/s), beginning of time step - Xmassdti, & ! mass of X-grid-cell/dt (kg/m^2 s) - waterx , & ! for ocean stress calculation, x (m/s) - watery , & ! for ocean stress calculation, y (m/s) - forcex , & ! work array: combined atm stress and ocn tilt, x - forcey ! work array: combined atm stress and ocn tilt, y - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - fm , & ! Coriolis param. * mass in U-cell (kg/s) - stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 - stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 - stress12_1,stress12_2,stress12_3,stress12_4, & ! sigma12 - uvel , & ! x-component of velocity (m/s) - vvel , & ! y-component of velocity (m/s) - strtltx , & ! stress due to sea surface slope, x-direction - strtlty , & ! stress due to sea surface slope, y-direction - strocnx , & ! ice-ocean stress, x-direction - strocny , & ! ice-ocean stress, y-direction - strintx , & ! divergence of internal ice stress, x (N/m^2) - strinty , & ! divergence of internal ice stress, y (N/m^2) - taubx , & ! seabed stress, x-direction (N/m^2) - tauby ! seabed stress, y-direction (N/m^2) - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - gravit - - logical (kind=log_kind), dimension(nx_block,ny_block) :: & - iceXmask_old ! old-time iceXmask - - character(len=*), parameter :: subname = '(dyn_prep2)' - - !----------------------------------------------------------------- - ! Initialize - !----------------------------------------------------------------- - - do j = 1, ny_block - do i = 1, nx_block - waterx (i,j) = c0 - watery (i,j) = c0 - forcex (i,j) = c0 - forcey (i,j) = c0 - Xmassdti (i,j) = c0 - TbU (i,j) = c0 - taubx (i,j) = c0 - tauby (i,j) = c0 - - if (.not.iceTmask(i,j)) then - stressp_1 (i,j) = c0 - stressp_2 (i,j) = c0 - stressp_3 (i,j) = c0 - stressp_4 (i,j) = c0 - stressm_1 (i,j) = c0 - stressm_2 (i,j) = c0 - stressm_3 (i,j) = c0 - stressm_4 (i,j) = c0 - stress12_1(i,j) = c0 - stress12_2(i,j) = c0 - stress12_3(i,j) = c0 - stress12_4(i,j) = c0 - endif - enddo ! i - enddo ! j - - !----------------------------------------------------------------- - ! Identify cells where iceTmask = .true. - ! Note: The icellT mask includes north and east ghost cells - ! where stresses are needed. - !----------------------------------------------------------------- - - icellT = 0 - do j = jlo, jhi+1 - do i = ilo, ihi+1 - if (iceTmask(i,j)) then - icellT = icellT + 1 - indxTi(icellT) = i - indxTj(icellT) = j - endif - enddo - enddo - - !----------------------------------------------------------------- - ! Define iceXmask - ! Identify cells where iceXmask is true - ! Initialize velocity where needed - !----------------------------------------------------------------- - - icellX = 0 - - do j = jlo, jhi - do i = ilo, ihi - iceXmask_old(i,j) = iceXmask(i,j) ! save - ! ice extent mask (U-cells) - iceXmask(i,j) = (Xmask(i,j)) .and. (aiX (i,j) > a_min) & - .and. (Xmass(i,j) > m_min) - - if (iceXmask(i,j)) then - icellX = icellX + 1 - indxXi(icellX) = i - indxXj(icellX) = j - - ! initialize velocity for new ice points to ocean sfc current - if (.not. iceXmask_old(i,j)) then - uvel(i,j) = uocn(i,j) - vvel(i,j) = vocn(i,j) - endif - else - ! set velocity and stresses to zero for masked-out points - uvel(i,j) = c0 - vvel(i,j) = c0 - strintx(i,j) = c0 - strinty(i,j) = c0 - strocnx(i,j) = c0 - strocny(i,j) = c0 - endif - - uvel_init(i,j) = uvel(i,j) - vvel_init(i,j) = vvel(i,j) - enddo - enddo - - !----------------------------------------------------------------- - ! Define variables for momentum equation - !----------------------------------------------------------------- - - if (trim(ssh_stress) == 'coupled') then - call icepack_query_parameters(gravit_out=gravit) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - endif - - do ij = 1, icellX - i = indxXi(ij) - j = indxXj(ij) - - Xmassdti(i,j) = Xmass(i,j)/dt ! kg/m^2 s - - fm(i,j) = fcor(i,j)*Xmass(i,j) ! Coriolis * mass - - ! for ocean stress - waterx(i,j) = uocn(i,j)*cosw - vocn(i,j)*sinw*sign(c1,fm(i,j)) - watery(i,j) = vocn(i,j)*cosw + uocn(i,j)*sinw*sign(c1,fm(i,j)) - - ! combine tilt with wind stress - if (trim(ssh_stress) == 'geostrophic') then - ! calculate tilt from geostrophic currents if needed - strtltx(i,j) = -fm(i,j)*vocn(i,j) - strtlty(i,j) = fm(i,j)*uocn(i,j) - elseif (trim(ssh_stress) == 'coupled') then - strtltx(i,j) = -gravit*Xmass(i,j)*ss_tltx(i,j) - strtlty(i,j) = -gravit*Xmass(i,j)*ss_tlty(i,j) - else - call abort_ice(subname//' ERROR: unknown ssh_stress='//trim(ssh_stress), & - file=__FILE__, line=__LINE__) - endif - - forcex(i,j) = strairx(i,j) + strtltx(i,j) - forcey(i,j) = strairy(i,j) + strtlty(i,j) - enddo - - end subroutine dyn_prep2 - -!======================================================================= -! Calculation of the surface stresses -! Integration of the momentum equation to find velocity (u,v) -! -! author: Elizabeth C. Hunke, LANL - - subroutine stepu (nx_block, ny_block, & - icellU, Cw, & - indxUi, indxUj, & - aiX, str, & - uocn, vocn, & - waterx, watery, & - forcex, forcey, & - Umassdti, fm, & - uarear, & - strintx, strinty, & - taubx, tauby, & - uvel_init, vvel_init,& - uvel, vvel, & - TbU) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU ! total count when iceUmask is true - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - TbU, & ! seabed stress factor (N/m^2) - uvel_init,& ! x-component of velocity (m/s), beginning of timestep - vvel_init,& ! y-component of velocity (m/s), beginning of timestep - aiX , & ! ice fraction on X-grid - waterx , & ! for ocean stress calculation, x (m/s) - watery , & ! for ocean stress calculation, y (m/s) - forcex , & ! work array: combined atm stress and ocn tilt, x - forcey , & ! work array: combined atm stress and ocn tilt, y - Umassdti, & ! mass of U-cell/dt (kg/m^2 s) - uocn , & ! ocean current, x-direction (m/s) - vocn , & ! ocean current, y-direction (m/s) - fm , & ! Coriolis param. * mass in U-cell (kg/s) - uarear ! 1/uarea - - real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(in) :: & - str ! temporary - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - uvel , & ! x-component of velocity (m/s) - vvel ! y-component of velocity (m/s) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - strintx , & ! divergence of internal ice stress, x (N/m^2) - strinty , & ! divergence of internal ice stress, y (N/m^2) - taubx , & ! seabed stress, x-direction (N/m^2) - tauby ! seabed stress, y-direction (N/m^2) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Cw ! ocean-ice neutral drag coefficient - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - uold, vold , & ! old-time uvel, vvel - vrel , & ! relative ice-ocean velocity - cca,ccb,ab2,cc1,cc2,& ! intermediate variables - taux, tauy , & ! part of ocean stress term - Cb , & ! complete seabed (basal) stress coeff - rhow ! - - character(len=*), parameter :: subname = '(stepu)' - - !----------------------------------------------------------------- - ! integrate the momentum equation - !----------------------------------------------------------------- - - call icepack_query_parameters(rhow_out=rhow) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - do ij =1, icellU - i = indxUi(ij) - j = indxUj(ij) - - uold = uvel(i,j) - vold = vvel(i,j) - - ! (magnitude of relative ocean current)*rhow*drag*aice - vrel = aiX(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & - (vocn(i,j) - vold)**2) ! m/s - ! ice/ocean stress - taux = vrel*waterx(i,j) ! NOTE this is not the entire - tauy = vrel*watery(i,j) ! ocn stress term - - Cb = TbU(i,j) / (sqrt(uold**2 + vold**2) + u0) ! for seabed stress - ! revp = 0 for classic evp, 1 for revised evp - cca = (brlx + revp)*Umassdti(i,j) + vrel * cosw + Cb ! kg/m^2 s - - ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s - - ab2 = cca**2 + ccb**2 - - ! divergence of the internal stress tensor - strintx(i,j) = uarear(i,j)* & - (str(i,j,1) + str(i+1,j,2) + str(i,j+1,3) + str(i+1,j+1,4)) - strinty(i,j) = uarear(i,j)* & - (str(i,j,5) + str(i,j+1,6) + str(i+1,j,7) + str(i+1,j+1,8)) - - ! finally, the velocity components - cc1 = strintx(i,j) + forcex(i,j) + taux & - + Umassdti(i,j)*(brlx*uold + revp*uvel_init(i,j)) - cc2 = strinty(i,j) + forcey(i,j) + tauy & - + Umassdti(i,j)*(brlx*vold + revp*vvel_init(i,j)) - - uvel(i,j) = (cca*cc1 + ccb*cc2) / ab2 ! m/s - vvel(i,j) = (cca*cc2 - ccb*cc1) / ab2 - - ! calculate seabed stress component for outputs - ! only needed on last iteration. - taubx(i,j) = -uvel(i,j)*Cb - tauby(i,j) = -vvel(i,j)*Cb - enddo ! ij - - end subroutine stepu - -!======================================================================= -! Integration of the momentum equation to find velocity (u,v) at E and N locations - - subroutine stepuv_CD (nx_block, ny_block, & - icell, Cw, & - indxi, indxj, & - aiX, & - uocn, vocn, & - waterx, watery, & - forcex, forcey, & - massdti, fm, & - strintx, strinty, & - taubx, tauby, & - uvel_init, vvel_init,& - uvel, vvel, & - Tb) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icell ! total count when ice[en]mask is true - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxi , & ! compressed index in i-direction - indxj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Tb, & ! seabed stress factor (N/m^2) - uvel_init,& ! x-component of velocity (m/s), beginning of timestep - vvel_init,& ! y-component of velocity (m/s), beginning of timestep - aiX , & ! ice fraction on X-grid - waterx , & ! for ocean stress calculation, x (m/s) - watery , & ! for ocean stress calculation, y (m/s) - forcex , & ! work array: combined atm stress and ocn tilt, x - forcey , & ! work array: combined atm stress and ocn tilt, y - massdti , & ! mass of [EN]-cell/dt (kg/m^2 s) - uocn , & ! ocean current, x-direction (m/s) - vocn , & ! ocean current, y-direction (m/s) - fm , & ! Coriolis param. * mass in [EN]-cell (kg/s) - strintx , & ! divergence of internal ice stress, x (N/m^2) - strinty ! divergence of internal ice stress, y (N/m^2) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - uvel , & ! x-component of velocity (m/s) - vvel ! y-component of velocity (m/s) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - taubx , & ! seabed stress, x-direction (N/m^2) - tauby ! seabed stress, y-direction (N/m^2) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Cw ! ocean-ice neutral drag coefficient - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - uold, vold , & ! old-time uvel, vvel - vrel , & ! relative ice-ocean velocity - cca,ccb,ccc,ab2 , & ! intermediate variables - cc1,cc2 , & ! " - taux, tauy , & ! part of ocean stress term - Cb , & ! complete seabed (basal) stress coeff - rhow ! - - character(len=*), parameter :: subname = '(stepuv_CD)' - - !----------------------------------------------------------------- - ! integrate the momentum equation - !----------------------------------------------------------------- - - call icepack_query_parameters(rhow_out=rhow) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - do ij =1, icell - i = indxi(ij) - j = indxj(ij) - - uold = uvel(i,j) - vold = vvel(i,j) - - ! (magnitude of relative ocean current)*rhow*drag*aice - vrel = aiX(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & - (vocn(i,j) - vold)**2) ! m/s - ! ice/ocean stress - taux = vrel*waterx(i,j) ! NOTE this is not the entire - tauy = vrel*watery(i,j) ! ocn stress term - - ccc = sqrt(uold**2 + vold**2) + u0 - Cb = Tb(i,j) / ccc ! for seabed stress - ! revp = 0 for classic evp, 1 for revised evp - cca = (brlx + revp)*massdti(i,j) + vrel * cosw + Cb ! kg/m^2 s - - ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s - - ab2 = cca**2 + ccb**2 - - ! compute the velocity components - cc1 = strintx(i,j) + forcex(i,j) + taux & - + massdti(i,j)*(brlx*uold + revp*uvel_init(i,j)) - cc2 = strinty(i,j) + forcey(i,j) + tauy & - + massdti(i,j)*(brlx*vold + revp*vvel_init(i,j)) - uvel(i,j) = (cca*cc1 + ccb*cc2) / ab2 ! m/s - vvel(i,j) = (cca*cc2 - ccb*cc1) / ab2 - - ! calculate seabed stress component for outputs - ! only needed on last iteration. - taubx(i,j) = -uvel(i,j)*Cb - tauby(i,j) = -vvel(i,j)*Cb - - enddo ! ij - - end subroutine stepuv_CD - -!======================================================================= -! Integration of the momentum equation to find velocity u at E location on C grid - - subroutine stepu_C (nx_block, ny_block, & - icell, Cw, & - indxi, indxj, & - aiX, & - uocn, vocn, & - waterx, forcex, & - massdti, fm, & - strintx, taubx, & - uvel_init, & - uvel, vvel, & - Tb) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icell ! total count when ice[en]mask is true - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxi , & ! compressed index in i-direction - indxj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Tb, & ! seabed stress factor (N/m^2) - uvel_init,& ! x-component of velocity (m/s), beginning of timestep - aiX , & ! ice fraction on X-grid - waterx , & ! for ocean stress calculation, x (m/s) - forcex , & ! work array: combined atm stress and ocn tilt, x - massdti , & ! mass of e-cell/dt (kg/m^2 s) - uocn , & ! ocean current, x-direction (m/s) - vocn , & ! ocean current, y-direction (m/s) - fm , & ! Coriolis param. * mass in e-cell (kg/s) - strintx , & ! divergence of internal ice stress, x (N/m^2) - Cw , & ! ocean-ice neutral drag coefficient - vvel ! y-component of velocity (m/s) interpolated to E location - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - uvel , & ! x-component of velocity (m/s) - taubx ! seabed stress, x-direction (N/m^2) - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - uold, vold , & ! old-time uvel, vvel - vrel , & ! relative ice-ocean velocity - cca,ccb,ccc,cc1 , & ! intermediate variables - taux , & ! part of ocean stress term - Cb , & ! complete seabed (basal) stress coeff - rhow ! - - character(len=*), parameter :: subname = '(stepu_C)' - - !----------------------------------------------------------------- - ! integrate the momentum equation - !----------------------------------------------------------------- - - call icepack_query_parameters(rhow_out=rhow) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - do ij =1, icell - i = indxi(ij) - j = indxj(ij) - - uold = uvel(i,j) - vold = vvel(i,j) - - ! (magnitude of relative ocean current)*rhow*drag*aice - vrel = aiX(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & - (vocn(i,j) - vold)**2) ! m/s - ! ice/ocean stress - taux = vrel*waterx(i,j) ! NOTE this is not the entire - - ccc = sqrt(uold**2 + vold**2) + u0 - Cb = Tb(i,j) / ccc ! for seabed stress - ! revp = 0 for classic evp, 1 for revised evp - cca = (brlx + revp)*massdti(i,j) + vrel * cosw + Cb ! kg/m^2 s - - ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s - - ! compute the velocity components - cc1 = strintx(i,j) + forcex(i,j) + taux & - + massdti(i,j)*(brlx*uold + revp*uvel_init(i,j)) - - uvel(i,j) = (ccb*vold + cc1) / cca ! m/s - - ! calculate seabed stress component for outputs - ! only needed on last iteration. - taubx(i,j) = -uvel(i,j)*Cb - - enddo ! ij - - end subroutine stepu_C - -!======================================================================= -! Integration of the momentum equation to find velocity v at N location on C grid - - subroutine stepv_C (nx_block, ny_block, & - icell, Cw, & - indxi, indxj, & - aiX, & - uocn, vocn, & - watery, forcey, & - massdti, fm, & - strinty, tauby, & - vvel_init, & - uvel, vvel, & - Tb) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icell ! total count when ice[en]mask is true - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxi , & ! compressed index in i-direction - indxj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Tb, & ! seabed stress factor (N/m^2) - vvel_init,& ! y-component of velocity (m/s), beginning of timestep - aiX , & ! ice fraction on X-grid - watery , & ! for ocean stress calculation, y (m/s) - forcey , & ! work array: combined atm stress and ocn tilt, y - massdti , & ! mass of n-cell/dt (kg/m^2 s) - uocn , & ! ocean current, x-direction (m/s) - vocn , & ! ocean current, y-direction (m/s) - fm , & ! Coriolis param. * mass in n-cell (kg/s) - strinty , & ! divergence of internal ice stress, y (N/m^2) - Cw , & ! ocean-ice neutral drag coefficient - uvel ! x-component of velocity (m/s) interpolated to N location - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - vvel , & ! y-component of velocity (m/s) - tauby ! seabed stress, y-direction (N/m^2) - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - uold, vold , & ! old-time uvel, vvel - vrel , & ! relative ice-ocean velocity - cca,ccb,ccc,cc2 , & ! intermediate variables - tauy , & ! part of ocean stress term - Cb , & ! complete seabed (basal) stress coeff - rhow ! - - character(len=*), parameter :: subname = '(stepv_C)' - - !----------------------------------------------------------------- - ! integrate the momentum equation - !----------------------------------------------------------------- - - call icepack_query_parameters(rhow_out=rhow) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - do ij =1, icell - i = indxi(ij) - j = indxj(ij) - - uold = uvel(i,j) - vold = vvel(i,j) - - ! (magnitude of relative ocean current)*rhow*drag*aice - vrel = aiX(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uold)**2 + & - (vocn(i,j) - vold)**2) ! m/s - ! ice/ocean stress - tauy = vrel*watery(i,j) ! NOTE this is not the entire ocn stress - - ccc = sqrt(uold**2 + vold**2) + u0 - Cb = Tb(i,j) / ccc ! for seabed stress - ! revp = 0 for classic evp, 1 for revised evp - cca = (brlx + revp)*massdti(i,j) + vrel * cosw + Cb ! kg/m^2 s - - ccb = fm(i,j) + sign(c1,fm(i,j)) * vrel * sinw ! kg/m^2 s - - ! compute the velocity components - cc2 = strinty(i,j) + forcey(i,j) + tauy & - + massdti(i,j)*(brlx*vold + revp*vvel_init(i,j)) - - vvel(i,j) = (-ccb*uold + cc2) / cca - - ! calculate seabed stress component for outputs - ! only needed on last iteration. - tauby(i,j) = -vvel(i,j)*Cb - - enddo ! ij - - end subroutine stepv_C - -!======================================================================= -! Calculation of the ice-ocean stress. -! ...the sign will be reversed later... -! -! author: Elizabeth C. Hunke, LANL - - subroutine dyn_finish (nx_block, ny_block, & - icellU, Cw, & - indxUi, indxUj, & - uvel, vvel, & - uocn, vocn, & - aiX, fm, & - strocnx, strocny) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU ! total count when iceUmask is true - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvel , & ! x-component of velocity (m/s) - vvel , & ! y-component of velocity (m/s) - uocn , & ! ocean current, x-direction (m/s) - vocn , & ! ocean current, y-direction (m/s) - aiX , & ! ice fraction on X-grid - fm ! Coriolis param. * mass in U-cell (kg/s) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - strocnx , & ! ice-ocean stress, x-direction - strocny ! ice-ocean stress, y-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Cw ! ocean-ice neutral drag coefficient - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - vrel , & ! - rhow ! - - character(len=*), parameter :: subname = '(dyn_finish)' - - call icepack_query_parameters(rhow_out=rhow) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - ! ocean-ice stress for coupling - do ij =1, icellU - i = indxUi(ij) - j = indxUj(ij) - - vrel = rhow*Cw(i,j)*sqrt((uocn(i,j) - uvel(i,j))**2 + & - (vocn(i,j) - vvel(i,j))**2) ! m/s - -! strocnx(i,j) = strocnx(i,j) & -! - vrel*(uvel(i,j)*cosw - vvel(i,j)*sinw) * aiX(i,j) -! strocny(i,j) = strocny(i,j) & -! - vrel*(vvel(i,j)*cosw + uvel(i,j)*sinw) * aiX(i,j) - - ! update strocnx to most recent iterate and complete the term - vrel = vrel * aiX(i,j) - strocnx(i,j) = vrel*((uocn(i,j) - uvel(i,j))*cosw & - - (vocn(i,j) - vvel(i,j))*sinw*sign(c1,fm(i,j))) - strocny(i,j) = vrel*((vocn(i,j) - vvel(i,j))*cosw & - + (uocn(i,j) - uvel(i,j))*sinw*sign(c1,fm(i,j))) - - ! Hibler/Bryan stress - ! the sign is reversed later, therefore negative here -! strocnx(i,j) = -(strairx(i,j) + strintx(i,j)) -! strocny(i,j) = -(strairy(i,j) + strinty(i,j)) - - enddo - - end subroutine dyn_finish - -!======================================================================= -! Computes seabed (basal) stress factor TbU (landfast ice) based on mean -! thickness and bathymetry data. LKD refers to linear keel draft. This -! parameterization assumes that the largest keel draft varies linearly -! with the mean thickness. -! -! Lemieux, J. F., B. Tremblay, F. Dupont, M. Plante, G.C. Smith, D. Dumont (2015). -! A basal stress parameterization form modeling landfast ice, J. Geophys. Res. -! Oceans, 120, 3157-3173. -! -! Lemieux, J. F., F. Dupont, P. Blain, F. Roy, G.C. Smith, G.M. Flato (2016). -! Improving the simulation of landfast ice by combining tensile strength and a -! parameterization for grounded ridges, J. Geophys. Res. Oceans, 121, 7354-7368. -! -! author: JF Lemieux, Philippe Blain (ECCC) -! -! note1: TbU is a part of the Cb as defined in Lemieux et al. 2015 and 2016. -! note2: Seabed stress (better name) was called basal stress in Lemieux et al. 2015 - - subroutine seabed_stress_factor_LKD (nx_block, ny_block, & - icellU, & - indxUi, indxUj, & - vice, aice, & - hwater, TbU, & - grid_location) - - use ice_grid, only: grid_neighbor_min, grid_neighbor_max - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU ! no. of cells where ice[uen]mask = 1 - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - aice , & ! concentration of ice at tracer location - vice , & ! volume per unit area of ice at tracer location (m) - hwater ! water depth at tracer location (m) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - TbU ! seabed stress factor at 'grid_location' (N/m^2) - - character(len=*), optional, intent(inout) :: & - grid_location ! grid location (U, E, N), U assumed if not present - - real (kind=dbl_kind) :: & - au , & ! concentration of ice at u location - hu , & ! volume per unit area of ice at u location (mean thickness, m) - hwu , & ! water depth at u location (m) - docalc_tbu, & ! logical as real (C0,C1) decides whether c0 is 0 or - hcu ! critical thickness at u location (m) - - integer (kind=int_kind) :: & - i, j, ij - - character(len=char_len) :: & - l_grid_location ! local version of 'grid_location' - - character(len=*), parameter :: subname = '(seabed_stress_factor_LKD)' - - ! Assume U location (NE corner) if grid_location not present - if (.not. (present(grid_location))) then - l_grid_location = 'U' - else - l_grid_location = grid_location - endif - - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) - - ! convert quantities to grid_location - - hwu = grid_neighbor_min(hwater, i, j, l_grid_location) - - docalc_tbu = merge(c1,c0,hwu < threshold_hw) - - - au = grid_neighbor_max(aice, i, j, l_grid_location) - hu = grid_neighbor_max(vice, i, j, l_grid_location) - - ! 1- calculate critical thickness - hcu = au * hwu / k1 - - ! 2- calculate seabed stress factor - TbU(i,j) = docalc_tbu*k2 * max(c0,(hu - hcu)) * exp(-alphab * (c1 - au)) - - enddo ! ij - - end subroutine seabed_stress_factor_LKD - -!======================================================================= -! Computes seabed (basal) stress factor TbU (landfast ice) based on -! probability of contact between the ITD and the seabed. The water depth -! could take into account variations of the SSH. In the simplest -! formulation, hwater is simply the value of the bathymetry. To calculate -! the probability of contact, it is assumed that the bathymetry follows -! a normal distribution with sigma_b = 2.5d0. An improvement would -! be to provide the distribution based on high resolution data. -! -! Dupont, F. Dumont, D., Lemieux, J.F., Dumas-Lefebvre, E., Caya, A. -! in prep. -! -! authors: D. Dumont, J.F. Lemieux, E. Dumas-Lefebvre, F. Dupont -! - subroutine seabed_stress_factor_prob (nx_block, ny_block, & - icellT, indxTi, indxTj, & - icellU, indxUi, indxUj, & - aicen, vicen, & - hwater, TbU, & - TbE, TbN, & - icellE, indxEi, indxEj, & - icellN, indxNi, indxNj) -! use modules - - use ice_arrays_column, only: hin_max - use ice_domain_size, only: ncat - use ice_grid, only: grid_neighbor_min, grid_neighbor_max - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellT, icellU ! no. of cells where ice[tu]mask = 1 - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxTi , & ! compressed index in i-direction - indxTj , & ! compressed index in j-direction - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - hwater ! water depth at tracer location (m) - - real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(in) :: & - aicen, & ! partial concentration for last thickness category in ITD - vicen ! partial volume for last thickness category in ITD (m) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - TbU ! seabed stress factor at U location (N/m^2) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout), optional :: & - TbE, & ! seabed stress factor at E location (N/m^2) - TbN ! seabed stress factor at N location (N/m^2) - - integer (kind=int_kind), intent(in), optional :: & - icellE, icellN ! no. of cells where ice[en]mask = 1 - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in), optional :: & - indxEi , & ! compressed index in i-direction - indxEj , & ! compressed index in j-direction - indxNi , & ! compressed index in i-direction - indxNj ! compressed index in j-direction - -! local variables - - integer (kind=int_kind) :: & - i, j, ij, ii, n - - integer, parameter :: & - ncat_b = 100, & ! number of bathymetry categories - ncat_i = 100 ! number of ice thickness categories (log-normal) - - real (kind=dbl_kind), parameter :: & - max_depth = 50.0_dbl_kind, & ! initial range of log-normal distribution - mu_s = 0.1_dbl_kind, & ! friction coefficient - sigma_b = 2.5d0 ! Standard deviation of bathymetry - - real (kind=dbl_kind), dimension(ncat_i) :: & ! log-normal for ice thickness - x_k, & ! center of thickness categories (m) - g_k, & ! probability density function (thickness, 1/m) - P_x ! probability for each thickness category - - real (kind=dbl_kind), dimension(ncat_b) :: & ! normal dist for bathymetry - y_n, & ! center of bathymetry categories (m) - b_n, & ! probability density function (bathymetry, 1/m) - P_y ! probability for each bathymetry category - - real (kind=dbl_kind), dimension(ncat) :: & - vcat, acat ! vice, aice temporary arrays - - integer, dimension(ncat_b) :: & - tmp ! Temporary vector tmp = merge(1,0,gt) - - logical, dimension (ncat_b) :: & - gt ! - - real (kind=dbl_kind) :: & - wid_i, wid_b , & ! parameters for PDFs - mu_i, sigma_i , & ! - mu_b, m_i, v_i, & ! - atot, x_kmax , & ! - cut , & ! - rhoi, rhow , & ! - gravit , & ! - pi, puny ! - - real (kind=dbl_kind), dimension(ncat_i) :: & - tb_tmp - - real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - Tbt ! seabed stress factor at t point (N/m^2) - - character(len=*), parameter :: subname = '(seabed_stress_factor_prob)' - - call icepack_query_parameters(rhow_out=rhow, rhoi_out=rhoi) - call icepack_query_parameters(gravit_out=gravit) - call icepack_query_parameters(pi_out=pi) - call icepack_query_parameters(puny_out=puny) - - Tbt=c0 - - do ij = 1, icellT - i = indxTi(ij) - j = indxTj(ij) - - atot = sum(aicen(i,j,1:ncat)) - - if (atot > 0.05_dbl_kind .and. hwater(i,j) < max_depth) then - - mu_b = hwater(i,j) ! mean of PDF (normal dist) bathymetry - wid_i = max_depth/ncat_i ! width of ice categories - wid_b = c6*sigma_b/ncat_b ! width of bathymetry categories (6 sigma_b = 2x3 sigma_b) - - x_k = (/( wid_i*( real(i,kind=dbl_kind) - p5 ), i=1, ncat_i )/) - y_n = (/( ( mu_b-c3*sigma_b )+( real(i,kind=dbl_kind) - p5 )*( c6*sigma_b/ncat_b ), i=1, ncat_b )/) - - vcat(1:ncat) = vicen(i,j,1:ncat) - acat(1:ncat) = aicen(i,j,1:ncat) - - m_i = sum(vcat) - - v_i=c0 - do n =1, ncat - v_i = v_i + vcat(n)**2 / (max(acat(n), puny)) - enddo - v_i = v_i - m_i**2 - - mu_i = log(m_i/sqrt(c1 + v_i/m_i**2)) ! parameters for the log-normal - sigma_i = sqrt(log(c1 + v_i/m_i**2)) - - ! max thickness associated with percentile of log-normal PDF - ! x_kmax=x997 was obtained from an optimization procedure (Dupont et al.) - - x_kmax = exp(mu_i + sqrt(c2*sigma_i)*1.9430d0) - - ! Set x_kmax to hlev of the last category where there is ice - ! when there is no ice in the last category - cut = x_k(ncat_i) - do n = ncat,-1,1 - if (acat(n) < puny) then - cut = hin_max(n-1) - else - exit - endif - enddo - x_kmax = min(cut, x_kmax) - - g_k = exp(-(log(x_k) - mu_i) ** 2 / (c2 * sigma_i ** 2)) / (x_k * sigma_i * sqrt(c2 * pi)) - - b_n = exp(-(y_n - mu_b) ** 2 / (c2 * sigma_b ** 2)) / (sigma_b * sqrt(c2 * pi)) - - P_x = g_k*wid_i - P_y = b_n*wid_b - - do n =1, ncat_i - if (x_k(n) > x_kmax) P_x(n)=c0 - enddo - - ! calculate Tb factor at t-location - do n=1, ncat_i - gt = (y_n <= rhoi*x_k(n)/rhow) - tmp = merge(1,0,gt) - ii = sum(tmp) - if (ii == 0) then - tb_tmp(n) = c0 - else - tb_tmp(n) = max(mu_s*gravit*P_x(n)*sum(P_y(1:ii)*(rhoi*x_k(n) - rhow*y_n(1:ii))),c0) - endif - enddo - Tbt(i,j) = sum(tb_tmp)*exp(-alphab * (c1 - atot)) - endif - enddo - - if (grid_ice == "B") then - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) - ! convert quantities to U-location - TbU(i,j) = grid_neighbor_max(Tbt, i, j, 'U') - enddo ! ij - elseif (grid_ice == "C" .or. grid_ice == "CD") then - if (present(Tbe) .and. present(TbN) .and. & - present(icellE) .and. present(icellN) .and. & - present(indxEi) .and. present(indxEj) .and. & - present(indxNi) .and. present(indxNj)) then - - do ij = 1, icellE - i = indxEi(ij) - j = indxEj(ij) - ! convert quantities to E-location - TbE(i,j) = grid_neighbor_max(Tbt, i, j, 'E') - enddo - do ij = 1, icellN - i = indxNi(ij) - j = indxNj(ij) - ! convert quantities to N-location - TbN(i,j) = grid_neighbor_max(Tbt, i, j, 'N') - enddo - - else - call abort_ice(subname // ' insufficient number of arguments for grid_ice:' // grid_ice) - endif - endif - - end subroutine seabed_stress_factor_prob - -!======================================================================= -! Computes principal stresses for comparison with the theoretical -! yield curve -! -! author: Elizabeth C. Hunke, LANL - - subroutine principal_stress(nx_block, ny_block, & - stressp, stressm, & - stress12, strength, & - sig1, sig2, & - sigP) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ! block dimensions - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - stressp , & ! sigma11 + sigma22 - stressm , & ! sigma11 - sigma22 - stress12 , & ! sigma12 - strength ! for normalization of sig1 and sig2 - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & - sig1 , & ! normalized principal stress component - sig2 , & ! normalized principal stress component - sigP ! internal ice pressure (N/m) - - ! local variables - - integer (kind=int_kind) :: & - i, j - - real (kind=dbl_kind) :: & - puny - - character(len=*), parameter :: subname = '(principal_stress)' - - call icepack_query_parameters(puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - do j = 1, ny_block - do i = 1, nx_block - if (strength(i,j) > puny) then - ! ice internal pressure - sigP(i,j) = -p5*stressp(i,j) - - ! normalized principal stresses - sig1(i,j) = (p5*(stressp(i,j) & - + sqrt(stressm(i,j)**2+c4*stress12(i,j)**2))) & - / strength(i,j) - sig2(i,j) = (p5*(stressp(i,j) & - - sqrt(stressm(i,j)**2+c4*stress12(i,j)**2))) & - / strength(i,j) - else - sig1(i,j) = spval_dbl - sig2(i,j) = spval_dbl - sigP(i,j) = spval_dbl - endif - enddo - enddo - - end subroutine principal_stress - -!======================================================================= -! Compute deformations for mechanical redistribution -! -! author: Elizabeth C. Hunke, LANL -! -! 2019: subroutine created by Philippe Blain, ECCC - - subroutine deformations (nx_block, ny_block, & - icellT, & - indxTi, indxTj, & - uvel, vvel, & - dxT, dyT, & - cxp, cyp, & - cxm, cym, & - tarear, & - shear, divu, & - rdg_conv, rdg_shear ) - - use ice_constants, only: p25, p5 - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxTi , & ! compressed index in i-direction - indxTj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvel , & ! x-component of velocity (m/s) - vvel , & ! y-component of velocity (m/s) - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - cyp , & ! 1.5*HTE - 0.5*HTW - cxp , & ! 1.5*HTN - 0.5*HTS - cym , & ! 0.5*HTE - 1.5*HTW - cxm , & ! 0.5*HTN - 1.5*HTS - tarear ! 1/tarea - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - shear , & ! strain rate II component (1/s) - divu , & ! strain rate I component, velocity divergence (1/s) - rdg_conv , & ! convergence term for ridging (1/s) - rdg_shear ! shear term for ridging (1/s) - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & ! at each corner : - divune, divunw, divuse, divusw , & ! divergence - tensionne, tensionnw, tensionse, tensionsw, & ! tension - shearne, shearnw, shearse, shearsw , & ! shearing - Deltane, Deltanw, Deltase, Deltasw , & ! Delta - tmp ! useful combination - - character(len=*), parameter :: subname = '(deformations)' - - do ij = 1, icellT - i = indxTi(ij) - j = indxTj(ij) - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - call strain_rates (nx_block, ny_block, & - i, j, & - uvel, vvel, & - dxT, dyT, & - cxp, cyp, & - cxm, cym, & - divune, divunw, & - divuse, divusw, & - tensionne, tensionnw, & - tensionse, tensionsw, & - shearne, shearnw, & - shearse, shearsw, & - Deltane, Deltanw, & - Deltase, Deltasw ) - !----------------------------------------------------------------- - ! deformations for mechanical redistribution - !----------------------------------------------------------------- - divu(i,j) = p25*(divune + divunw + divuse + divusw) * tarear(i,j) - tmp = p25*(Deltane + Deltanw + Deltase + Deltasw) * tarear(i,j) - rdg_conv(i,j) = -min(divu(i,j),c0) - rdg_shear(i,j) = p5*(tmp-abs(divu(i,j))) - - ! diagnostic only - ! shear = sqrt(tension**2 + shearing**2) - shear(i,j) = p25*tarear(i,j)*sqrt( & - (tensionne + tensionnw + tensionse + tensionsw)**2 + & - (shearne + shearnw + shearse + shearsw )**2) - - enddo ! ij - - end subroutine deformations - -!======================================================================= -! Compute deformations for mechanical redistribution at T point -! -! author: JF Lemieux, ECCC -! Nov 2021 - - subroutine deformationsCD_T (nx_block, ny_block, & - icellT, & - indxTi, indxTj, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - tarear, & - shear, divu, & - rdg_conv, rdg_shear ) - - use ice_constants, only: p5 - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxTi , & ! compressed index in i-direction - indxTj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the N point - uvelN , & ! x-component of velocity (m/s) at the E point - vvelN , & ! y-component of velocity (m/s) at the N point - dxN , & ! width of N-cell through the middle (m) - dyE , & ! height of E-cell through the middle (m) - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - tarear ! 1/tarea - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - shear , & ! strain rate II component (1/s) - divu , & ! strain rate I component, velocity divergence (1/s) - rdg_conv , & ! convergence term for ridging (1/s) - rdg_shear ! shear term for ridging (1/s) - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - divT , & ! divergence at T point - tensionT , & ! tension at T point - shearT , & ! shear at T point - DeltaT ! delt at T point - - real (kind=dbl_kind) :: & - tmp ! useful combination - - character(len=*), parameter :: subname = '(deformations_T)' - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - call strain_rates_T (nx_block , ny_block , & - icellT , & - indxTi(:) , indxTj (:) , & - uvelE (:,:), vvelE (:,:), & - uvelN (:,:), vvelN (:,:), & - dxN (:,:), dyE (:,:), & - dxT (:,:), dyT (:,:), & - divT (:,:), tensionT(:,:), & - shearT(:,:), DeltaT (:,:) ) - - do ij = 1, icellT - i = indxTi(ij) - j = indxTj(ij) - - !----------------------------------------------------------------- - ! deformations for mechanical redistribution - !----------------------------------------------------------------- - divu(i,j) = divT(i,j) * tarear(i,j) - tmp = Deltat(i,j) * tarear(i,j) - rdg_conv(i,j) = -min(divu(i,j),c0) - rdg_shear(i,j) = p5*(tmp-abs(divu(i,j))) - - ! diagnostic only - ! shear = sqrt(tension**2 + shearing**2) - shear(i,j) = tarear(i,j)*sqrt( tensionT(i,j)**2 + shearT(i,j)**2 ) - - enddo ! ij - - end subroutine deformationsCD_T - - -!======================================================================= -! Compute deformations for mechanical redistribution at T point -! -! author: JF Lemieux, ECCC -! Nov 2021 - - subroutine deformationsC_T (nx_block, ny_block, & - icellT, & - indxTi, indxTj, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - tarear, uarea, & - shearU, & - shear, divu, & - rdg_conv, rdg_shear ) - - use ice_constants, only: p5 - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxTi , & ! compressed index in i-direction - indxTj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the N point - uvelN , & ! x-component of velocity (m/s) at the E point - vvelN , & ! y-component of velocity (m/s) at the N point - dxN , & ! width of N-cell through the middle (m) - dyE , & ! height of E-cell through the middle (m) - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - tarear , & ! 1/tarea - uarea , & ! area of u cell - shearU ! shearU - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - shear , & ! strain rate II component (1/s) - divu , & ! strain rate I component, velocity divergence (1/s) - rdg_conv , & ! convergence term for ridging (1/s) - rdg_shear ! shear term for ridging (1/s) - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind), dimension (nx_block,ny_block) :: & - divT , & ! divergence at T point - tensionT , & ! tension at T point - shearT , & ! shear at T point - DeltaT ! delt at T point - - real (kind=dbl_kind) :: & - tmp , & ! useful combination - shearTsqr ! strain rates squared at T point - - character(len=*), parameter :: subname = '(deformations_T2)' - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - call strain_rates_T (nx_block , ny_block , & - icellT , & - indxTi(:) , indxTj (:) , & - uvelE (:,:), vvelE (:,:), & - uvelN (:,:), vvelN (:,:), & - dxN (:,:), dyE (:,:), & - dxT (:,:), dyT (:,:), & - divT (:,:), tensionT(:,:), & - shearT(:,:), DeltaT (:,:) ) - - ! DeltaT is calc by strain_rates_T but replaced by calculation below. - - do ij = 1, icellT - i = indxTi(ij) - j = indxTj(ij) - - !----------------------------------------------------------------- - ! deformations for mechanical redistribution - !----------------------------------------------------------------- - - shearTsqr = (shearU(i ,j )**2 * uarea(i ,j ) & - + shearU(i ,j-1)**2 * uarea(i ,j-1) & - + shearU(i-1,j-1)**2 * uarea(i-1,j-1) & - + shearU(i-1,j )**2 * uarea(i-1,j )) & - / (uarea(i,j)+uarea(i,j-1)+uarea(i-1,j-1)+uarea(i-1,j)) - - DeltaT(i,j) = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearTsqr)) - - divu(i,j) = divT(i,j) * tarear(i,j) - tmp = DeltaT(i,j) * tarear(i,j) - rdg_conv(i,j) = -min(divu(i,j),c0) - rdg_shear(i,j) = p5*(tmp-abs(divu(i,j))) - - ! diagnostic only...maybe we dont want to use shearTsqr here???? - ! shear = sqrt(tension**2 + shearing**2) - shear(i,j) = tarear(i,j)*sqrt( tensionT(i,j)**2 + shearT(i,j)**2 ) - - enddo ! ij - - end subroutine deformationsC_T - -!======================================================================= -! Compute strain rates -! -! author: Elizabeth C. Hunke, LANL -! -! 2019: subroutine created by Philippe Blain, ECCC - - subroutine strain_rates (nx_block, ny_block, & - i, j, & - uvel, vvel, & - dxT, dyT, & - cxp, cyp, & - cxm, cym, & - divune, divunw, & - divuse, divusw, & - tensionne, tensionnw, & - tensionse, tensionsw, & - shearne, shearnw, & - shearse, shearsw, & - Deltane, Deltanw, & - Deltase, Deltasw ) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ! block dimensions - - integer (kind=int_kind), intent(in) :: & - i, j ! indices - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvel , & ! x-component of velocity (m/s) - vvel , & ! y-component of velocity (m/s) - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - cyp , & ! 1.5*HTE - 0.5*HTW - cxp , & ! 1.5*HTN - 0.5*HTS - cym , & ! 0.5*HTE - 1.5*HTW - cxm ! 0.5*HTN - 1.5*HTS - - real (kind=dbl_kind), intent(out):: & ! at each corner : - divune, divunw, divuse, divusw , & ! divergence - tensionne, tensionnw, tensionse, tensionsw, & ! tension - shearne, shearnw, shearse, shearsw , & ! shearing - Deltane, Deltanw, Deltase, Deltasw ! Delta - - character(len=*), parameter :: subname = '(strain_rates)' - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - ! divergence = e_11 + e_22 - divune = cyp(i,j)*uvel(i ,j ) - dyT(i,j)*uvel(i-1,j ) & - + cxp(i,j)*vvel(i ,j ) - dxT(i,j)*vvel(i ,j-1) - divunw = cym(i,j)*uvel(i-1,j ) + dyT(i,j)*uvel(i ,j ) & - + cxp(i,j)*vvel(i-1,j ) - dxT(i,j)*vvel(i-1,j-1) - divusw = cym(i,j)*uvel(i-1,j-1) + dyT(i,j)*uvel(i ,j-1) & - + cxm(i,j)*vvel(i-1,j-1) + dxT(i,j)*vvel(i-1,j ) - divuse = cyp(i,j)*uvel(i ,j-1) - dyT(i,j)*uvel(i-1,j-1) & - + cxm(i,j)*vvel(i ,j-1) + dxT(i,j)*vvel(i ,j ) - - ! tension strain rate = e_11 - e_22 - tensionne = -cym(i,j)*uvel(i ,j ) - dyT(i,j)*uvel(i-1,j ) & - + cxm(i,j)*vvel(i ,j ) + dxT(i,j)*vvel(i ,j-1) - tensionnw = -cyp(i,j)*uvel(i-1,j ) + dyT(i,j)*uvel(i ,j ) & - + cxm(i,j)*vvel(i-1,j ) + dxT(i,j)*vvel(i-1,j-1) - tensionsw = -cyp(i,j)*uvel(i-1,j-1) + dyT(i,j)*uvel(i ,j-1) & - + cxp(i,j)*vvel(i-1,j-1) - dxT(i,j)*vvel(i-1,j ) - tensionse = -cym(i,j)*uvel(i ,j-1) - dyT(i,j)*uvel(i-1,j-1) & - + cxp(i,j)*vvel(i ,j-1) - dxT(i,j)*vvel(i ,j ) - - ! shearing strain rate = 2*e_12 - shearne = -cym(i,j)*vvel(i ,j ) - dyT(i,j)*vvel(i-1,j ) & - - cxm(i,j)*uvel(i ,j ) - dxT(i,j)*uvel(i ,j-1) - shearnw = -cyp(i,j)*vvel(i-1,j ) + dyT(i,j)*vvel(i ,j ) & - - cxm(i,j)*uvel(i-1,j ) - dxT(i,j)*uvel(i-1,j-1) - shearsw = -cyp(i,j)*vvel(i-1,j-1) + dyT(i,j)*vvel(i ,j-1) & - - cxp(i,j)*uvel(i-1,j-1) + dxT(i,j)*uvel(i-1,j ) - shearse = -cym(i,j)*vvel(i ,j-1) - dyT(i,j)*vvel(i-1,j-1) & - - cxp(i,j)*uvel(i ,j-1) + dxT(i,j)*uvel(i ,j ) - - ! Delta (in the denominator of zeta, eta) - Deltane = sqrt(divune**2 + e_factor*(tensionne**2 + shearne**2)) - Deltanw = sqrt(divunw**2 + e_factor*(tensionnw**2 + shearnw**2)) - Deltasw = sqrt(divusw**2 + e_factor*(tensionsw**2 + shearsw**2)) - Deltase = sqrt(divuse**2 + e_factor*(tensionse**2 + shearse**2)) - - end subroutine strain_rates - -!======================================================================= -! Compute dtsd (div, tension, shear, delta) strain rates at the T point -! -! author: JF Lemieux, ECCC -! Nov 2021 - - subroutine strain_rates_Tdtsd (nx_block, ny_block, & - icellT, & - indxTi, indxTj, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - divT, tensionT, & - shearT, DeltaT ) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellT - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxTi , & ! compressed index in i-direction - indxTj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the N point - uvelN , & ! x-component of velocity (m/s) at the E point - vvelN , & ! y-component of velocity (m/s) at the N point - dxN , & ! width of N-cell through the middle (m) - dyE , & ! height of E-cell through the middle (m) - dxT , & ! width of T-cell through the middle (m) - dyT ! height of T-cell through the middle (m) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & - divT , & ! divergence at T point - tensionT , & ! tension at T point - shearT , & ! shear at T point - DeltaT ! strain rates at the T point - - ! local variables - - integer (kind=int_kind) :: & - ij, i, j ! indices - - character(len=*), parameter :: subname = '(strain_rates_Tdtsd)' - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - ! compute divT, tensionT - call strain_rates_Tdt (nx_block, ny_block, & - icellT, & - indxTi, indxTj, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - divT, tensionT ) - - shearT (:,:) = c0 - deltaT (:,:) = c0 - - do ij = 1, icellT - i = indxTi(ij) - j = indxTj(ij) - - ! shearing strain rate = 2*e_12 - shearT(i,j) = (dxT(i,j)**2)*(uvelN(i,j)/dxN(i,j) - uvelN(i,j-1)/dxN(i,j-1)) & - + (dyT(i,j)**2)*(vvelE(i,j)/dyE(i,j) - vvelE(i-1,j)/dyE(i-1,j)) - - ! Delta (in the denominator of zeta, eta) - DeltaT(i,j) = sqrt(divT(i,j)**2 + e_factor*(tensionT(i,j)**2 + shearT(i,j)**2)) - - enddo - - end subroutine strain_rates_Tdtsd - -!======================================================================= -! Compute the dt (div, tension) strain rates at the T point -! -! author: JF Lemieux, ECCC -! Nov 2021 - - subroutine strain_rates_Tdt (nx_block, ny_block, & - icellT, & - indxTi, indxTj, & - uvelE, vvelE, & - uvelN, vvelN, & - dxN, dyE, & - dxT, dyT, & - divT, tensionT ) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellT - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxTi , & ! compressed index in i-direction - indxTj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the N point - uvelN , & ! x-component of velocity (m/s) at the E point - vvelN , & ! y-component of velocity (m/s) at the N point - dxN , & ! width of N-cell through the middle (m) - dyE , & ! height of E-cell through the middle (m) - dxT , & ! width of T-cell through the middle (m) - dyT ! height of T-cell through the middle (m) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & - divT , & ! divergence at T point - tensionT ! tension at T point - - ! local variables - - integer (kind=int_kind) :: & - ij, i, j ! indices - - character(len=*), parameter :: subname = '(strain_rates_Tdt)' - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - divT (:,:) = c0 - tensionT(:,:) = c0 - - do ij = 1, icellT - i = indxTi(ij) - j = indxTj(ij) - - ! divergence = e_11 + e_22 - divT (i,j)= dyE(i,j)*uvelE(i ,j ) - dyE(i-1,j)*uvelE(i-1,j ) & - + dxN(i,j)*vvelN(i ,j ) - dxN(i,j-1)*vvelN(i ,j-1) - - ! tension strain rate = e_11 - e_22 - tensionT(i,j) = (dyT(i,j)**2)*(uvelE(i,j)/dyE(i,j) - uvelE(i-1,j)/dyE(i-1,j)) & - - (dxT(i,j)**2)*(vvelN(i,j)/dxN(i,j) - vvelN(i,j-1)/dxN(i,j-1)) - - enddo - - end subroutine strain_rates_Tdt - -!======================================================================= -! Compute strain rates at the U point including boundary conditions -! -! author: JF Lemieux, ECCC -! Nov 2021 - - subroutine strain_rates_U (nx_block, ny_block, & - icellU, & - indxUi, indxUj, & - uvelE, vvelE, & - uvelN, vvelN, & - uvelU, vvelU, & - dxE, dyN, & - dxU, dyU, & - ratiodxN, ratiodxNr, & - ratiodyE, ratiodyEr, & - epm, npm, & - divergU, tensionU, & - shearU, DeltaU ) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvelE , & ! x-component of velocity (m/s) at the E point - vvelE , & ! y-component of velocity (m/s) at the N point - uvelN , & ! x-component of velocity (m/s) at the E point - vvelN , & ! y-component of velocity (m/s) at the N point - uvelU , & ! x-component of velocity (m/s) interp. at U point - vvelU , & ! y-component of velocity (m/s) interp. at U point - dxE , & ! width of E-cell through the middle (m) - dyN , & ! height of N-cell through the middle (m) - dxU , & ! width of U-cell through the middle (m) - dyU , & ! height of U-cell through the middle (m) - ratiodxN , & ! -dxN(i+1,j)/dxN(i,j) for BCs - ratiodxNr, & ! -dxN(i,j)/dxN(i+1,j) for BCs - ratiodyE , & ! -dyE(i,j+1)/dyE(i,j) for BCs - ratiodyEr, & ! -dyE(i,j)/dyE(i,j+1) for BCs - epm , & ! E-cell mask - npm ! N-cell mask - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out):: & - divergU , & ! divergence at U point - tensionU , & ! tension at U point - shearU , & ! shear at U point - DeltaU ! delt at the U point - - ! local variables - - integer (kind=int_kind) :: & - ij, i, j ! indices - - real (kind=dbl_kind) :: & - uNip1j, uNij, vEijp1, vEij, uEijp1, uEij, vNip1j, vNij - - character(len=*), parameter :: subname = '(strain_rates_U)' - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - - divergU (:,:) = c0 - tensionU(:,:) = c0 - shearU (:,:) = c0 - deltaU (:,:) = c0 - - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) - - uNip1j = uvelN(i+1,j) * npm(i+1,j) & - +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * uvelN(i,j) - uNij = uvelN(i,j) * npm(i,j) & - +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * uvelN(i+1,j) - vEijp1 = vvelE(i,j+1) * epm(i,j+1) & - +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * vvelE(i,j) - vEij = vvelE(i,j) * epm(i,j) & - +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * vvelE(i,j+1) - - ! divergence = e_11 + e_22 - divergU (i,j) = dyU(i,j) * ( uNip1j - uNij ) & - + uvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - + dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) - - ! tension strain rate = e_11 - e_22 - tensionU(i,j) = dyU(i,j) * ( uNip1j - uNij ) & - - uvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) & - - dxU(i,j) * ( vEijp1 - vEij ) & - + vvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) - - uEijp1 = uvelE(i,j+1) * epm(i,j+1) & - +(epm(i,j)-epm(i,j+1)) * epm(i,j) * ratiodyE(i,j) * uvelE(i,j) - uEij = uvelE(i,j) * epm(i,j) & - +(epm(i,j+1)-epm(i,j)) * epm(i,j+1) * ratiodyEr(i,j) * uvelE(i,j+1) - vNip1j = vvelN(i+1,j) * npm(i+1,j) & - +(npm(i,j)-npm(i+1,j)) * npm(i,j) * ratiodxN(i,j) * vvelN(i,j) - vNij = vvelN(i,j) * npm(i,j) & - +(npm(i+1,j)-npm(i,j)) * npm(i+1,j) * ratiodxNr(i,j) * vvelN(i+1,j) - - ! shearing strain rate = 2*e_12 - shearU(i,j) = dxU(i,j) * ( uEijp1 - uEij ) & - - uvelU(i,j) * ( dxE(i,j+1) - dxE(i,j) ) & - + dyU(i,j) * ( vNip1j - vNij ) & - - vvelU(i,j) * ( dyN(i+1,j) - dyN(i,j) ) - - ! Delta (in the denominator of zeta, eta) - DeltaU(i,j) = sqrt(divergU(i,j)**2 + e_factor*(tensionU(i,j)**2 + shearU(i,j)**2)) - - enddo - - end subroutine strain_rates_U - -!======================================================================= -! Computes viscosities and replacement pressure for stress -! calculations. Note that tensile strength is included here. -! -! Hibler, W. D. (1979). A dynamic thermodynamic sea ice model. J. Phys. -! Oceanogr., 9, 817-846. -! -! Konig Beatty, C. and Holland, D. M. (2010). Modeling landfast ice by -! adding tensile strength. J. Phys. Oceanogr. 40, 185-198. -! -! Lemieux, J. F. et al. (2016). Improving the simulation of landfast ice -! by combining tensile strength and a parameterization for grounded ridges. -! J. Geophys. Res. Oceans, 121, 7354-7368. - - subroutine visc_replpress(strength, DminArea, Delta, & - zetax2, etax2, rep_prs, capping) - - real (kind=dbl_kind), intent(in):: & - strength, & ! - DminArea ! - - real (kind=dbl_kind), intent(in):: & - Delta , & ! - capping ! - - real (kind=dbl_kind), intent(out):: & - zetax2 , & ! bulk viscosity - etax2 , & ! shear viscosity - rep_prs ! replacement pressure - - ! local variables - real (kind=dbl_kind) :: & - tmpcalc ! temporary - - character(len=*), parameter :: subname = '(visc_replpress)' - - ! NOTE: for comp. efficiency 2 x zeta and 2 x eta are used in the code - - tmpcalc = capping *(strength/max(Delta,DminArea))+ & - (c1-capping)*(strength/(Delta + DminArea)) - zetax2 = (c1+Ktens)*tmpcalc - rep_prs = (c1-Ktens)*tmpcalc*Delta - etax2 = epp2i*zetax2 - - end subroutine visc_replpress - -!======================================================================= -! Do a halo update on 1 field - - subroutine dyn_haloUpdate1(halo_info, halo_info_mask, field_loc, field_type, fld1) - - use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: maskhalo_dyn, halo_dynbundle - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound - - type (ice_halo), intent(in) :: & - halo_info , & ! standard unmasked halo - halo_info_mask ! masked halo - - integer (kind=int_kind), intent(in) :: & - field_loc , & ! field loc - field_type ! field_type - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - fld1 ! fields to halo - - ! local variables - - character(len=*), parameter :: subname = '(dyn_haloUpdate1)' - - call ice_timer_start(timer_bound) - - if (maskhalo_dyn) then - call ice_HaloUpdate (fld1 , halo_info_mask, & - field_loc, field_type) - else - call ice_HaloUpdate (fld1 , halo_info , & - field_loc, field_type) - endif - - call ice_timer_stop(timer_bound) - - end subroutine dyn_haloUpdate1 - -!======================================================================= -! Do a halo update on 2 fields - - subroutine dyn_haloUpdate2(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2) - - use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: maskhalo_dyn, halo_dynbundle - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound - - type (ice_halo), intent(in) :: & - halo_info , & ! standard unmasked halo - halo_info_mask ! masked halo - - integer (kind=int_kind), intent(in) :: & - field_loc , & ! field loc - field_type ! field_type - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - fld1 , & ! fields to halo - fld2 ! - - ! local variables - - real (kind=dbl_kind), dimension (nx_block,ny_block,2,max_blocks) :: & - fldbundle ! work array for boundary updates - - character(len=*), parameter :: subname = '(dyn_haloUpdate2)' - - call ice_timer_start(timer_bound) - ! single process performs better without bundling fields - if (halo_dynbundle) then - - call stack_fields(fld1, fld2, fldbundle) - if (maskhalo_dyn) then - call ice_HaloUpdate (fldbundle, halo_info_mask, & - field_loc, field_type) - else - call ice_HaloUpdate (fldbundle, halo_info , & - field_loc, field_type) - endif - call unstack_fields(fldbundle, fld1, fld2) - - else - - if (maskhalo_dyn) then - call ice_HaloUpdate (fld1 , halo_info_mask, & - field_loc, field_type) - call ice_HaloUpdate (fld2 , halo_info_mask, & - field_loc, field_type) - else - call ice_HaloUpdate (fld1 , halo_info , & - field_loc, field_type) - call ice_HaloUpdate (fld2 , halo_info , & - field_loc, field_type) - endif - - endif - call ice_timer_stop(timer_bound) - - end subroutine dyn_haloUpdate2 - -!======================================================================= -! Do a halo update on 3 fields - - subroutine dyn_haloUpdate3(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3) - - use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: maskhalo_dyn, halo_dynbundle - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound - - type (ice_halo), intent(in) :: & - halo_info , & ! standard unmasked halo - halo_info_mask ! masked halo - - integer (kind=int_kind), intent(in) :: & - field_loc , & ! field loc - field_type ! field_type - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - fld1 , & ! fields to halo - fld2 , & ! - fld3 ! - - ! local variables - - real (kind=dbl_kind), dimension (nx_block,ny_block,3,max_blocks) :: & - fldbundle ! work array for boundary updates - - character(len=*), parameter :: subname = '(dyn_haloUpdate3)' - - call ice_timer_start(timer_bound) - ! single process performs better without bundling fields - if (halo_dynbundle) then - - call stack_fields(fld1, fld2, fld3, fldbundle) - if (maskhalo_dyn) then - call ice_HaloUpdate (fldbundle, halo_info_mask, & - field_loc, field_type) - else - call ice_HaloUpdate (fldbundle, halo_info , & - field_loc, field_type) - endif - call unstack_fields(fldbundle, fld1, fld2, fld3) - - else - - if (maskhalo_dyn) then - call ice_HaloUpdate (fld1 , halo_info_mask, & - field_loc, field_type) - call ice_HaloUpdate (fld2 , halo_info_mask, & - field_loc, field_type) - call ice_HaloUpdate (fld3 , halo_info_mask, & - field_loc, field_type) - else - call ice_HaloUpdate (fld1 , halo_info , & - field_loc, field_type) - call ice_HaloUpdate (fld2 , halo_info , & - field_loc, field_type) - call ice_HaloUpdate (fld3 , halo_info , & - field_loc, field_type) - endif - - endif - call ice_timer_stop(timer_bound) - - end subroutine dyn_haloUpdate3 - -!======================================================================= -! Do a halo update on 4 fields - - subroutine dyn_haloUpdate4(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3, fld4) - - use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: maskhalo_dyn, halo_dynbundle - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound - - type (ice_halo), intent(in) :: & - halo_info , & ! standard unmasked halo - halo_info_mask ! masked halo - - integer (kind=int_kind), intent(in) :: & - field_loc, & ! field loc - field_type ! field_type - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - fld1 , & ! fields to halo - fld2 , & ! - fld3 , & ! - fld4 ! - - ! local variables - - real (kind=dbl_kind), dimension (nx_block,ny_block,4,max_blocks) :: & - fldbundle ! work array for boundary updates - - character(len=*), parameter :: subname = '(dyn_haloUpdate4)' - - call ice_timer_start(timer_bound) - ! single process performs better without bundling fields - if (halo_dynbundle) then - - call stack_fields(fld1, fld2, fld3, fld4, fldbundle) - if (maskhalo_dyn) then - call ice_HaloUpdate (fldbundle, halo_info_mask, & - field_loc, field_type) - else - call ice_HaloUpdate (fldbundle, halo_info , & - field_loc, field_type) - endif - call unstack_fields(fldbundle, fld1, fld2, fld3, fld4) - - else - - if (maskhalo_dyn) then - call ice_HaloUpdate (fld1 , halo_info_mask, & - field_loc, field_type) - call ice_HaloUpdate (fld2 , halo_info_mask, & - field_loc, field_type) - call ice_HaloUpdate (fld3 , halo_info_mask, & - field_loc, field_type) - call ice_HaloUpdate (fld4 , halo_info_mask, & - field_loc, field_type) - else - call ice_HaloUpdate (fld1 , halo_info , & - field_loc, field_type) - call ice_HaloUpdate (fld2 , halo_info , & - field_loc, field_type) - call ice_HaloUpdate (fld3 , halo_info , & - field_loc, field_type) - call ice_HaloUpdate (fld4 , halo_info , & - field_loc, field_type) - endif - - endif - call ice_timer_stop(timer_bound) - - end subroutine dyn_haloUpdate4 - -!======================================================================= -! Do a halo update on 5 fields - - subroutine dyn_haloUpdate5(halo_info, halo_info_mask, field_loc, field_type, fld1, fld2, fld3, fld4, fld5) - - use ice_boundary, only: ice_halo, ice_HaloUpdate - use ice_domain, only: maskhalo_dyn, halo_dynbundle - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound - - type (ice_halo), intent(in) :: & - halo_info , & ! standard unmasked halo - halo_info_mask ! masked halo - - integer (kind=int_kind), intent(in) :: & - field_loc , & ! field loc - field_type ! field_type - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - fld1 , & ! fields to halo - fld2 , & ! - fld3 , & ! - fld4 , & ! - fld5 ! - - ! local variables - - real (kind=dbl_kind), dimension (nx_block,ny_block,5,max_blocks) :: & - fldbundle ! work array for boundary updates - - character(len=*), parameter :: subname = '(dyn_haloUpdate5)' - - call ice_timer_start(timer_bound) - ! single process performs better without bundling fields - if (halo_dynbundle) then - - call stack_fields(fld1, fld2, fld3, fld4, fld5, fldbundle) - if (maskhalo_dyn) then - call ice_HaloUpdate (fldbundle, halo_info_mask, & - field_loc, field_type) - else - call ice_HaloUpdate (fldbundle, halo_info , & - field_loc, field_type) - endif - call unstack_fields(fldbundle, fld1, fld2, fld3, fld4, fld5) - - else - - if (maskhalo_dyn) then - call ice_HaloUpdate (fld1 , halo_info_mask, & - field_loc, field_type) - call ice_HaloUpdate (fld2 , halo_info_mask, & - field_loc, field_type) - call ice_HaloUpdate (fld3 , halo_info_mask, & - field_loc, field_type) - call ice_HaloUpdate (fld4 , halo_info_mask, & - field_loc, field_type) - call ice_HaloUpdate (fld5 , halo_info_mask, & - field_loc, field_type) - else - call ice_HaloUpdate (fld1 , halo_info , & - field_loc, field_type) - call ice_HaloUpdate (fld2 , halo_info , & - field_loc, field_type) - call ice_HaloUpdate (fld3 , halo_info , & - field_loc, field_type) - call ice_HaloUpdate (fld4 , halo_info , & - field_loc, field_type) - call ice_HaloUpdate (fld5 , halo_info , & - field_loc, field_type) - endif - - endif - call ice_timer_stop(timer_bound) - - end subroutine dyn_haloUpdate5 - -!======================================================================= -! Load fields into array for boundary updates - - subroutine stack_fields2(fld1, fld2, fldbundle) - - use ice_domain, only: nblocks - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound - - real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & - fld1 , & ! fields to stack - fld2 ! - - real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) - - ! local variables - - integer (kind=int_kind) :: & - iblk ! block index - - character(len=*), parameter :: subname = '(stack_fields2)' - - call ice_timer_start(timer_bundbound) - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - fldbundle(:,:,1,iblk) = fld1(:,:,iblk) - fldbundle(:,:,2,iblk) = fld2(:,:,iblk) - enddo - !$OMP END PARALLEL DO - call ice_timer_stop(timer_bundbound) - - end subroutine stack_fields2 - -!======================================================================= -! Load fields into array for boundary updates - - subroutine stack_fields3(fld1, fld2, fld3, fldbundle) - - use ice_domain, only: nblocks - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound - - real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & - fld1 , & ! fields to stack - fld2 , & ! - fld3 ! - - real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) - - ! local variables - - integer (kind=int_kind) :: & - iblk ! block index - - character(len=*), parameter :: subname = '(stack_fields3)' - - call ice_timer_start(timer_bundbound) - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - fldbundle(:,:,1,iblk) = fld1(:,:,iblk) - fldbundle(:,:,2,iblk) = fld2(:,:,iblk) - fldbundle(:,:,3,iblk) = fld3(:,:,iblk) - enddo - !$OMP END PARALLEL DO - call ice_timer_stop(timer_bundbound) - - end subroutine stack_fields3 - -!======================================================================= -! Load fields into array for boundary updates - - subroutine stack_fields4(fld1, fld2, fld3, fld4, fldbundle) - - use ice_domain, only: nblocks - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound - - real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & - fld1 , & ! fields to stack - fld2 , & ! - fld3 , & ! - fld4 ! - - real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) - - ! local variables - - integer (kind=int_kind) :: & - iblk ! block index - - character(len=*), parameter :: subname = '(stack_fields4)' - - call ice_timer_start(timer_bundbound) - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - fldbundle(:,:,1,iblk) = fld1(:,:,iblk) - fldbundle(:,:,2,iblk) = fld2(:,:,iblk) - fldbundle(:,:,3,iblk) = fld3(:,:,iblk) - fldbundle(:,:,4,iblk) = fld4(:,:,iblk) - enddo - !$OMP END PARALLEL DO - call ice_timer_stop(timer_bundbound) - - end subroutine stack_fields4 - -!======================================================================= -! Load fields into array for boundary updates - - subroutine stack_fields5(fld1, fld2, fld3, fld4, fld5, fldbundle) - - use ice_domain, only: nblocks - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound - - real (kind=dbl_kind), dimension (:,:,:), intent(in) :: & - fld1 , & ! fields to stack - fld2 , & ! - fld3 , & ! - fld4 , & ! - fld5 ! - - real (kind=dbl_kind), dimension (:,:,:,:), intent(out) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) - - ! local variables - - integer (kind=int_kind) :: & - iblk ! block index - - character(len=*), parameter :: subname = '(stack_fields5)' - - call ice_timer_start(timer_bundbound) - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - fldbundle(:,:,1,iblk) = fld1(:,:,iblk) - fldbundle(:,:,2,iblk) = fld2(:,:,iblk) - fldbundle(:,:,3,iblk) = fld3(:,:,iblk) - fldbundle(:,:,4,iblk) = fld4(:,:,iblk) - fldbundle(:,:,5,iblk) = fld5(:,:,iblk) - enddo - !$OMP END PARALLEL DO - call ice_timer_stop(timer_bundbound) - - end subroutine stack_fields5 - -!======================================================================= -! Unload fields from array after boundary updates - - subroutine unstack_fields2(fldbundle, fld1, fld2) - - use ice_domain, only: nblocks - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound - - real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) - - real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & - fld1 , & ! fields to unstack - fld2 ! - - ! local variables - - integer (kind=int_kind) :: & - iblk ! block index - - character(len=*), parameter :: subname = '(unstack_fields2)' - - call ice_timer_start(timer_bundbound) - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - fld1(:,:,iblk) = fldbundle(:,:,1,iblk) - fld2(:,:,iblk) = fldbundle(:,:,2,iblk) - enddo - !$OMP END PARALLEL DO - call ice_timer_stop(timer_bundbound) - - end subroutine unstack_fields2 - -!======================================================================= -! Unload fields from array after boundary updates - - subroutine unstack_fields3(fldbundle, fld1, fld2, fld3) - - use ice_domain, only: nblocks - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound - - real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) - - real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & - fld1 , & ! fields to unstack - fld2 , & ! - fld3 ! - - ! local variables - - integer (kind=int_kind) :: & - iblk ! block index - - character(len=*), parameter :: subname = '(unstack_fields3)' - - call ice_timer_start(timer_bundbound) - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - fld1(:,:,iblk) = fldbundle(:,:,1,iblk) - fld2(:,:,iblk) = fldbundle(:,:,2,iblk) - fld3(:,:,iblk) = fldbundle(:,:,3,iblk) - enddo - !$OMP END PARALLEL DO - call ice_timer_stop(timer_bundbound) - - end subroutine unstack_fields3 - -!======================================================================= -! Unload fields from array after boundary updates - - subroutine unstack_fields4(fldbundle, fld1, fld2, fld3, fld4) - - use ice_domain, only: nblocks - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound - - real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) - - real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & - fld1 , & ! fields to unstack - fld2 , & ! - fld3 , & ! - fld4 ! - - ! local variables - - integer (kind=int_kind) :: & - iblk ! block index - - character(len=*), parameter :: subname = '(unstack_fields4)' - - call ice_timer_start(timer_bundbound) - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - fld1(:,:,iblk) = fldbundle(:,:,1,iblk) - fld2(:,:,iblk) = fldbundle(:,:,2,iblk) - fld3(:,:,iblk) = fldbundle(:,:,3,iblk) - fld4(:,:,iblk) = fldbundle(:,:,4,iblk) - enddo - !$OMP END PARALLEL DO - call ice_timer_stop(timer_bundbound) - - end subroutine unstack_fields4 - -!======================================================================= -! Unload fields from array after boundary updates - - subroutine unstack_fields5(fldbundle, fld1, fld2, fld3, fld4, fld5) - - use ice_domain, only: nblocks - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bundbound - - real (kind=dbl_kind), dimension (:,:,:,:), intent(in) :: & - fldbundle ! work array for boundary updates (i,j,n,iblk) - - real (kind=dbl_kind), dimension (:,:,:), intent(out) :: & - fld1 , & ! fields to unstack - fld2 , & ! - fld3 , & ! - fld4 , & ! - fld5 ! - - ! local variables - - integer (kind=int_kind) :: & - iblk ! block index - - character(len=*), parameter :: subname = '(unstack_fields5)' - - call ice_timer_start(timer_bundbound) - !$OMP PARALLEL DO PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - fld1(:,:,iblk) = fldbundle(:,:,1,iblk) - fld2(:,:,iblk) = fldbundle(:,:,2,iblk) - fld3(:,:,iblk) = fldbundle(:,:,3,iblk) - fld4(:,:,iblk) = fldbundle(:,:,4,iblk) - fld5(:,:,iblk) = fldbundle(:,:,5,iblk) - enddo - !$OMP END PARALLEL DO - call ice_timer_stop(timer_bundbound) - - end subroutine unstack_fields5 - -!======================================================================= - - end module ice_dyn_shared - -!======================================================================= diff --git a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 b/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 deleted file mode 100644 index 6534e7568..000000000 --- a/cicecore/cicedynB/dynamics/ice_dyn_vp.F90 +++ /dev/null @@ -1,3779 +0,0 @@ -!======================================================================= -! -! Viscous-plastic sea ice dynamics model -! Computes ice velocity and deformation -! -! See: -! -! Lemieux, J.‐F., Tremblay, B., Thomas, S., Sedláček, J., and Mysak, L. A. (2008), -! Using the preconditioned Generalized Minimum RESidual (GMRES) method to solve -! the sea‐ice momentum equation, J. Geophys. Res., 113, C10004, doi:10.1029/2007JC004680. -! -! Hibler, W. D., and Ackley, S. F. (1983), Numerical simulation of the Weddell Sea pack ice, -! J. Geophys. Res., 88( C5), 2873– 2887, doi:10.1029/JC088iC05p02873. -! -! Y. Saad. A Flexible Inner-Outer Preconditioned GMRES Algorithm. SIAM J. Sci. Comput., -! 14(2):461–469, 1993. URL: https://doi.org/10.1137/0914028, doi:10.1137/0914028. -! -! C. T. Kelley, Iterative Methods for Linear and Nonlinear Equations, SIAM, 1995. -! (https://www.siam.org/books/textbooks/fr16_book.pdf) -! -! Y. Saad, Iterative Methods for Sparse Linear Systems. SIAM, 2003. -! (http://www-users.cs.umn.edu/~saad/IterMethBook_2ndEd.pdf) -! -! Walker, H. F., & Ni, P. (2011). Anderson Acceleration for Fixed-Point Iterations. -! SIAM Journal on Numerical Analysis, 49(4), 1715–1735. https://doi.org/10.1137/10078356X -! -! Fang, H., & Saad, Y. (2009). Two classes of multisecant methods for nonlinear acceleration. -! Numerical Linear Algebra with Applications, 16(3), 197–221. https://doi.org/10.1002/nla.617 -! -! Birken, P. (2015) Termination criteria for inexact fixed‐point schemes. -! Numer. Linear Algebra Appl., 22: 702– 716. doi: 10.1002/nla.1982. -! -! authors: JF Lemieux, ECCC, Philppe Blain, ECCC -! - - module ice_dyn_vp - - use ice_kinds_mod - use ice_blocks, only: nx_block, ny_block - use ice_boundary, only: ice_halo - use ice_communicate, only: my_task, master_task, get_num_procs - use ice_constants, only: field_loc_center, field_loc_NEcorner, & - field_type_scalar, field_type_vector - use ice_constants, only: c0, p027, p055, p111, p166, & - p222, p25, p333, p5, c1 - use ice_domain, only: nblocks, distrb_info - use ice_domain_size, only: max_blocks - use ice_dyn_shared, only: dyn_prep1, dyn_prep2, dyn_finish, & - cosw, sinw, fcor_blk, uvel_init, vvel_init, & - seabed_stress_factor_LKD, seabed_stress_factor_prob, seabed_stress_method, & - seabed_stress, Ktens, stack_fields, unstack_fields - use ice_fileunits, only: nu_diag - use ice_flux, only: fmU - use ice_global_reductions, only: global_sum - use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, uarear - use ice_exit, only: abort_ice - use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted - use icepack_intfc, only: icepack_ice_strength, icepack_query_parameters - - implicit none - private - public :: implicit_solver, init_vp - - ! namelist parameters - - integer (kind=int_kind), public :: & - maxits_nonlin , & ! max nb of iteration for nonlinear solver - dim_fgmres , & ! size of fgmres Krylov subspace - dim_pgmres , & ! size of pgmres Krylov subspace - maxits_fgmres , & ! max nb of iteration for fgmres - maxits_pgmres , & ! max nb of iteration for pgmres - fpfunc_andacc , & ! fixed point function for Anderson acceleration: - ! 1: g(x) = FMGRES(A(x),b(x)), 2: g(x) = x - A(x)x + b(x) - dim_andacc , & ! size of Anderson minimization matrix (number of saved previous residuals) - start_andacc ! acceleration delay factor (acceleration starts at this iteration) - - logical (kind=log_kind), public :: & - monitor_nonlin , & ! print nonlinear residual norm - monitor_fgmres , & ! print fgmres residual norm - monitor_pgmres , & ! print pgmres residual norm - use_mean_vrel ! use mean of previous 2 iterates to compute vrel (see Hibler and Ackley 1983) - - real (kind=dbl_kind), public :: & - reltol_nonlin , & ! nonlinear stopping criterion: reltol_nonlin*res(k=0) - reltol_fgmres , & ! fgmres stopping criterion: reltol_fgmres*res(k) - reltol_pgmres , & ! pgmres stopping criterion: reltol_pgmres*res(k) - damping_andacc , & ! damping factor for Anderson acceleration - reltol_andacc ! relative tolerance for Anderson acceleration - - character (len=char_len), public :: & - precond , & ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), - ! 'pgmres' (Jacobi-preconditioned GMRES) - algo_nonlin , & ! nonlinear algorithm: 'picard' (Picard iteration), 'anderson' (Anderson acceleration) - ortho_type ! type of orthogonalization for FGMRES ('cgs' or 'mgs') - - ! module variables - - integer (kind=int_kind), allocatable :: & - icellT(:) , & ! no. of cells where iceTmask = .true. - icellU(:) ! no. of cells where iceUmask = .true. - - integer (kind=int_kind), allocatable :: & - indxTi(:,:) , & ! compressed index in i-direction - indxTj(:,:) , & ! compressed index in j-direction - indxUi(:,:) , & ! compressed index in i-direction - indxUj(:,:) ! compressed index in j-direction - - real (kind=dbl_kind), allocatable :: & - fld2(:,:,:,:), & ! work array for boundary updates - fld3(:,:,:,:), & ! work array for boundary updates - fld4(:,:,:,:) ! work array for boundary updates - -!======================================================================= - - contains - -!======================================================================= - -! Initialize parameters and variables needed for the vp dynamics -! author: Philippe Blain, ECCC - - subroutine init_vp - - use ice_blocks, only: get_block, block - use ice_boundary, only: ice_HaloUpdate - use ice_constants, only: c1, & - field_loc_center, field_type_scalar - use ice_domain, only: blocks_ice, halo_info -! use ice_grid, only: tarea - - ! local variables - - integer (kind=int_kind) :: & - i, j, iblk, & - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - type (block) :: & - this_block ! block information for current block - - ! Initialize module variables - allocate(icellT(max_blocks), icellU(max_blocks)) - allocate(indxTi(nx_block*ny_block, max_blocks), & - indxTj(nx_block*ny_block, max_blocks), & - indxUi(nx_block*ny_block, max_blocks), & - indxUj(nx_block*ny_block, max_blocks)) - allocate(fld2(nx_block,ny_block,2,max_blocks)) - allocate(fld3(nx_block,ny_block,3,max_blocks)) - allocate(fld4(nx_block,ny_block,4,max_blocks)) - - end subroutine init_vp - -!======================================================================= - -! Viscous-plastic dynamics driver -! -#ifdef CICE_IN_NEMO -! Wind stress is set during this routine from the values supplied -! via NEMO (unless calc_strair is true). These values are supplied -! rotated on u grid and multiplied by aice. strairxT = 0 in this -! case so operations in dyn_prep1 are pointless but carried out to -! minimise code changes. -#endif -! -! author: JF Lemieux, A. Qaddouri and F. Dupont ECCC - - subroutine implicit_solver (dt) - - use ice_arrays_column, only: Cdn_ocn - use ice_boundary, only: ice_HaloMask, ice_HaloUpdate, & - ice_HaloDestroy, ice_HaloUpdate_stress - use ice_blocks, only: block, get_block, nx_block, ny_block - use ice_domain, only: blocks_ice, halo_info, maskhalo_dyn - use ice_domain_size, only: max_blocks, ncat - use ice_dyn_shared, only: deformations, iceTmask, iceUmask - use ice_flux, only: rdg_conv, rdg_shear, strairxT, strairyT, & - strairxU, strairyU, uocn, vocn, ss_tltx, ss_tlty, fmU, & - strtltxU, strtltyU, strocnxU, strocnyU, strintxU, strintyU, taubxU, taubyU, & - strax, stray, & - TbU, hwater, & - stressp_1, stressp_2, stressp_3, stressp_4, & - stressm_1, stressm_2, stressm_3, stressm_4, & - stress12_1, stress12_2, stress12_3, stress12_4 - use ice_grid, only: tmask, umask, dxT, dyT, cxp, cyp, cxm, cym, & - tarear, grid_type, grid_average_X2Y, & - grid_atm_dynu, grid_atm_dynv, grid_ocn_dynu, grid_ocn_dynv - use ice_state, only: aice, aiU, vice, vsno, uvel, vvel, divu, shear, & - aice_init, aice0, aicen, vicen, strength - use ice_timers, only: timer_dynamics, timer_bound, & - ice_timer_start, ice_timer_stop - - real (kind=dbl_kind), intent(in) :: & - dt ! time step - - ! local variables - - integer (kind=int_kind) :: & - ntot , & ! size of problem for Anderson - iblk , & ! block index - ilo,ihi,jlo,jhi, & ! beginning and end of physical domain - i, j, ij - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - uocnU , & ! i ocean current (m/s) - vocnU , & ! j ocean current (m/s) - ss_tltxU , & ! sea surface slope, x-direction (m/m) - ss_tltyU , & ! sea surface slope, y-direction (m/m) - cdn_ocnU , & ! ocn drag coefficient - tmass , & ! total mass of ice and snow (kg/m^2) - waterxU , & ! for ocean stress calculation, x (m/s) - wateryU , & ! for ocean stress calculation, y (m/s) - forcexU , & ! work array: combined atm stress and ocn tilt, x - forceyU , & ! work array: combined atm stress and ocn tilt, y - bxfix , & ! part of bx that is constant during Picard - byfix , & ! part of by that is constant during Picard - Cb , & ! seabed stress coefficient - fpresx , & ! fixed point residual vector, x components: fx = uvel - uprev_k - fpresy , & ! fixed point residual vector, y components: fy = vvel - vprev_k - umass , & ! total mass of ice and snow (u grid) - umassdti ! mass of U-cell/dte (kg/m^2 s) - - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4):: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) - etax2 , & ! etax2 = 2*eta (shear viscosity) - rep_prs ! replacement pressure - - logical (kind=log_kind) :: calc_strair - - integer (kind=int_kind), dimension (nx_block,ny_block,max_blocks) :: & - halomask ! generic halo mask - - type (ice_halo) :: & - halo_info_mask ! ghost cell update info for masked halo - - type (block) :: & - this_block ! block information for current block - - real (kind=dbl_kind), allocatable :: & - sol(:) ! solution vector - - character(len=*), parameter :: subname = '(implicit_solver)' - - call ice_timer_start(timer_dynamics) ! dynamics - - !----------------------------------------------------------------- - ! Initialize - !----------------------------------------------------------------- - - ! This call is needed only if dt changes during runtime. -! call set_evp_parameters (dt) - - !----------------------------------------------------------------- - ! boundary updates - ! commented out because the ghost cells are freshly - ! updated after cleanup_itd - !----------------------------------------------------------------- - -! call ice_timer_start(timer_bound) -! call ice_HaloUpdate (aice, halo_info, & -! field_loc_center, field_type_scalar) -! call ice_HaloUpdate (vice, halo_info, & -! field_loc_center, field_type_scalar) -! call ice_HaloUpdate (vsno, halo_info, & -! field_loc_center, field_type_scalar) -! call ice_timer_stop(timer_bound) - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - - do j = 1, ny_block - do i = 1, nx_block - rdg_conv (i,j,iblk) = c0 - rdg_shear(i,j,iblk) = c0 - divu (i,j,iblk) = c0 - shear(i,j,iblk) = c0 - enddo - enddo - - !----------------------------------------------------------------- - ! preparation for dynamics - !----------------------------------------------------------------- - - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - call dyn_prep1 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - aice (:,:,iblk), vice (:,:,iblk), & - vsno (:,:,iblk), tmask (:,:,iblk), & - tmass (:,:,iblk), iceTmask(:,:,iblk)) - - enddo ! iblk - !$OMP END PARALLEL DO - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (iceTmask, halo_info, & - field_loc_center, field_type_scalar) - call ice_timer_stop(timer_bound) - - !----------------------------------------------------------------- - ! convert fields from T to U grid - !----------------------------------------------------------------- - - call stack_fields(tmass, aice_init, cdn_ocn, fld3) - call ice_HaloUpdate (fld3, halo_info, & - field_loc_center, field_type_scalar) - call stack_fields(uocn, vocn, ss_tltx, ss_tlty, fld4) - call ice_HaloUpdate (fld4, halo_info, & - field_loc_center, field_type_vector) - call unstack_fields(fld3, tmass, aice_init, cdn_ocn) - call unstack_fields(fld4, uocn, vocn, ss_tltx, ss_tlty) - - call grid_average_X2Y('S',tmass , 'T' , umass , 'U') - call grid_average_X2Y('S',aice_init, 'T' , aiU , 'U') - call grid_average_X2Y('S',cdn_ocn , 'T' , cdn_ocnU, 'U') - call grid_average_X2Y('S',uocn , grid_ocn_dynu, uocnU , 'U') - call grid_average_X2Y('S',vocn , grid_ocn_dynv, vocnU , 'U') - call grid_average_X2Y('S',ss_tltx , grid_ocn_dynu, ss_tltxU, 'U') - call grid_average_X2Y('S',ss_tlty , grid_ocn_dynv, ss_tltyU, 'U') - - !---------------------------------------------------------------- - ! Set wind stress to values supplied via NEMO or other forcing - ! This wind stress is rotated on u grid and multiplied by aice - !---------------------------------------------------------------- - call icepack_query_parameters(calc_strair_out=calc_strair) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - if (.not. calc_strair) then - call grid_average_X2Y('F', strax, grid_atm_dynu, strairxU, 'U') - call grid_average_X2Y('F', stray, grid_atm_dynv, strairyU, 'U') - else - call ice_HaloUpdate (strairxT, halo_info, & - field_loc_center, field_type_vector) - call ice_HaloUpdate (strairyT, halo_info, & - field_loc_center, field_type_vector) - call grid_average_X2Y('F',strairxT,'T',strairxU,'U') - call grid_average_X2Y('F',strairyT,'T',strairyU,'U') - endif - - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block,ij,i,j) - do iblk = 1, nblocks - - !----------------------------------------------------------------- - ! more preparation for dynamics - !----------------------------------------------------------------- - - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - call dyn_prep2 (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - icellT(iblk), icellU(iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - aiU (:,:,iblk), umass (:,:,iblk), & - umassdti (:,:,iblk), fcor_blk (:,:,iblk), & - umask (:,:,iblk), & - uocnU (:,:,iblk), vocnU (:,:,iblk), & - strairxU (:,:,iblk), strairyU (:,:,iblk), & - ss_tltxU (:,:,iblk), ss_tltyU (:,:,iblk), & - iceTmask (:,:,iblk), iceUmask (:,:,iblk), & - fmU (:,:,iblk), dt, & - strtltxU (:,:,iblk), strtltyU (:,:,iblk), & - strocnxU (:,:,iblk), strocnyU (:,:,iblk), & - strintxU (:,:,iblk), strintyU (:,:,iblk), & - taubxU (:,:,iblk), taubyU (:,:,iblk), & - waterxU (:,:,iblk), wateryU (:,:,iblk), & - forcexU (:,:,iblk), forceyU (:,:,iblk), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk), & - uvel_init (:,:,iblk), vvel_init (:,:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - TbU (:,:,iblk)) - - call calc_bfix (nx_block , ny_block , & - icellU(iblk) , & - indxUi (:,iblk), indxUj (:,iblk), & - umassdti (:,:,iblk), & - forcexU (:,:,iblk), forceyU (:,:,iblk), & - uvel_init (:,:,iblk), vvel_init (:,:,iblk), & - bxfix (:,:,iblk), byfix (:,:,iblk)) - - !----------------------------------------------------------------- - ! ice strength - !----------------------------------------------------------------- - - strength(:,:,iblk) = c0 ! initialize - do ij = 1, icellT(iblk) - i = indxTi(ij, iblk) - j = indxTj(ij, iblk) - call icepack_ice_strength (ncat, & - aice (i,j, iblk), & - vice (i,j, iblk), & - aice0 (i,j, iblk), & - aicen (i,j,:,iblk), & - vicen (i,j,:,iblk), & - strength(i,j, iblk)) - enddo ! ij - - enddo ! iblk - !$OMP END PARALLEL DO - - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (strength, halo_info, & - field_loc_center, field_type_scalar) - ! velocities may have changed in dyn_prep2 - call stack_fields(uvel, vvel, fld2) - call ice_HaloUpdate (fld2, halo_info, & - field_loc_NEcorner, field_type_vector) - call unstack_fields(fld2, uvel, vvel) - call ice_timer_stop(timer_bound) - - if (maskhalo_dyn) then - call ice_timer_start(timer_bound) - halomask = 0 - where (iceUmask) halomask = 1 - call ice_HaloUpdate (halomask, halo_info, & - field_loc_center, field_type_scalar) - call ice_timer_stop(timer_bound) - call ice_HaloMask(halo_info_mask, halo_info, halomask) - endif - - !----------------------------------------------------------------- - ! seabed stress factor TbU (TbU is part of Cb coefficient) - !----------------------------------------------------------------- - if (seabed_stress) then - if ( seabed_stress_method == 'LKD' ) then - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call seabed_stress_factor_LKD (nx_block, ny_block, & - icellU (iblk), & - indxUi(:,iblk), indxUj(:,iblk), & - vice(:,:,iblk), aice(:,:,iblk), & - hwater(:,:,iblk), TbU(:,:,iblk)) - enddo - !$OMP END PARALLEL DO - - elseif ( seabed_stress_method == 'probabilistic' ) then - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - - call seabed_stress_factor_prob (nx_block, ny_block, & - icellT(iblk), indxTi(:,iblk), indxTj(:,iblk), & - icellU(iblk), indxUi(:,iblk), indxUj(:,iblk), & - aicen(:,:,:,iblk), vicen(:,:,:,iblk), & - hwater(:,:,iblk), TbU(:,:,iblk)) - enddo - !$OMP END PARALLEL DO - - endif - endif - - - !----------------------------------------------------------------- - ! calc size of problem (ntot) and allocate solution vector - !----------------------------------------------------------------- - - ntot = 0 - do iblk = 1, nblocks - ntot = ntot + icellU(iblk) - enddo - ntot = 2 * ntot ! times 2 because of u and v - - allocate(sol(ntot)) - - !----------------------------------------------------------------- - ! Start of nonlinear iteration - !----------------------------------------------------------------- - call anderson_solver (icellT , icellU , & - indxTi , indxTj , & - indxUi , indxUj , & - aiU , ntot , & - uocnU , vocnU , & - waterxU , wateryU, & - bxfix , byfix , & - umassdti, sol , & - fpresx , fpresy , & - zetax2 , etax2 , & - rep_prs , cdn_ocnU,& - Cb, halo_info_mask) - !----------------------------------------------------------------- - ! End of nonlinear iteration - !----------------------------------------------------------------- - - deallocate(sol) - - if (maskhalo_dyn) call ice_HaloDestroy(halo_info_mask) - - !----------------------------------------------------------------- - ! Compute stresses - !----------------------------------------------------------------- - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call stress_vp (nx_block , ny_block , & - icellT(iblk) , & - indxTi (:,iblk), indxTj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - cxp (:,:,iblk), cyp (:,:,iblk), & - cxm (:,:,iblk), cym (:,:,iblk), & - zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:), & - rep_prs (:,:,iblk,:), & - stressp_1 (:,:,iblk), stressp_2 (:,:,iblk), & - stressp_3 (:,:,iblk), stressp_4 (:,:,iblk), & - stressm_1 (:,:,iblk), stressm_2 (:,:,iblk), & - stressm_3 (:,:,iblk), stressm_4 (:,:,iblk), & - stress12_1(:,:,iblk), stress12_2(:,:,iblk), & - stress12_3(:,:,iblk), stress12_4(:,:,iblk)) - enddo ! iblk - !$OMP END PARALLEL DO - - !----------------------------------------------------------------- - ! Compute deformations - !----------------------------------------------------------------- - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call deformations (nx_block , ny_block , & - icellT(iblk) , & - indxTi (:,iblk), indxTj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - cxp (:,:,iblk), cyp (:,:,iblk), & - cxm (:,:,iblk), cym (:,:,iblk), & - tarear (:,:,iblk), & - shear (:,:,iblk), divu (:,:,iblk), & - rdg_conv (:,:,iblk), rdg_shear (:,:,iblk)) - enddo - !$OMP END PARALLEL DO - - !----------------------------------------------------------------- - ! Compute seabed stress (diagnostic) - !----------------------------------------------------------------- - if (seabed_stress) then - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call calc_seabed_stress (nx_block , ny_block , & - icellU(iblk) , & - indxUi (:,iblk), indxUj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - Cb (:,:,iblk), & - taubxU (:,:,iblk), taubyU (:,:,iblk)) - enddo - !$OMP END PARALLEL DO - endif - - ! Force symmetry across the tripole seam - if (trim(grid_type) == 'tripole') then - if (maskhalo_dyn) then - !------------------------------------------------------- - ! set halomask to zero because ice_HaloMask always keeps - ! local copies AND tripole zipper communication - !------------------------------------------------------- - halomask = 0 - call ice_HaloMask(halo_info_mask, halo_info, halomask) - - call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_3, stressp_1, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_2, stressp_4, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_4, stressp_2, halo_info_mask, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stressm_1, stressm_3, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_3, stressm_1, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_2, stressm_4, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_4, stressm_2, halo_info_mask, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stress12_1, stress12_3, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_3, stress12_1, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_2, stress12_4, halo_info_mask, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info_mask, & - field_loc_center, field_type_scalar) - - call ice_HaloDestroy(halo_info_mask) - else - call ice_HaloUpdate_stress(stressp_1, stressp_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_3, stressp_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_2, stressp_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressp_4, stressp_2, halo_info, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stressm_1, stressm_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_3, stressm_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_2, stressm_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stressm_4, stressm_2, halo_info, & - field_loc_center, field_type_scalar) - - call ice_HaloUpdate_stress(stress12_1, stress12_3, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_3, stress12_1, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_2, stress12_4, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate_stress(stress12_4, stress12_2, halo_info, & - field_loc_center, field_type_scalar) - endif ! maskhalo - endif ! tripole - - !----------------------------------------------------------------- - ! ice-ocean stress - !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - - call dyn_finish & - (nx_block, ny_block, & - icellU (iblk), Cdn_ocnU(:,:,iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - uvel (:,:,iblk), vvel (:,:,iblk), & - uocnU (:,:,iblk), vocnU (:,:,iblk), & - aiU (:,:,iblk), fmU (:,:,iblk), & -! strintxU(:,:,iblk), strintyU(:,:,iblk), & -! strairxU(:,:,iblk), strairyU(:,:,iblk), & - strocnxU(:,:,iblk), strocnyU(:,:,iblk)) - - enddo - !$OMP END PARALLEL DO - -! shift velocity components from CD grid locations (N, E) to B grid location (U) for transport -! commented out in order to focus on EVP for now within the cdgrid -! should be used when routine is ready -! if (grid_ice == 'CD' .or. grid_ice == 'C') then -! call grid_average_X2Y('E2US',uvelE,uvel) -! call grid_average_X2Y('N2US',vvelN,vvel) -! endif -!end comment out - call ice_timer_stop(timer_dynamics) ! dynamics - - end subroutine implicit_solver - -!======================================================================= - -! Solve the nonlinear equation F(u,v) = 0, where -! F(u,v) := A(u,v) * (u,v) - b(u,v) -! using Anderson acceleration (accelerated fixed point (Picard) iteration) -! -! author: JF Lemieux, A. Qaddouri, F. Dupont and P. Blain ECCC -! -! Anderson algorithm adadpted from: -! H. F. Walker, “Anderson Acceleration: Algorithms and Implementations” -! [Online]. Available: https://users.wpi.edu/~walker/Papers/anderson_accn_algs_imps.pdf - - subroutine anderson_solver (icellT , icellU , & - indxTi , indxTj , & - indxUi , indxUj , & - aiU , ntot , & - uocn , vocn , & - waterxU , wateryU, & - bxfix , byfix , & - umassdti, sol , & - fpresx , fpresy , & - zetax2 , etax2 , & - rep_prs , cdn_ocn, & - Cb, halo_info_mask) - - use ice_blocks, only: nx_block, ny_block - use ice_boundary, only: ice_HaloUpdate - use ice_constants, only: c1 - use ice_domain, only: maskhalo_dyn, halo_info - use ice_domain_size, only: max_blocks - use ice_flux, only: fmU, TbU - use ice_grid, only: dxT, dyT, dxhy, dyhx, cxp, cyp, cxm, cym, & - uarear - use ice_dyn_shared, only: DminTarea - use ice_state, only: uvel, vvel, strength - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound - - integer (kind=int_kind), intent(in) :: & - ntot ! size of problem for Anderson - - integer (kind=int_kind), dimension(max_blocks), intent(in) :: & - icellT , & ! no. of cells where iceTmask = .true. - icellU ! no. of cells where iceUmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & - indxTi , & ! compressed index in i-direction - indxTj , & ! compressed index in j-direction - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & - aiU , & ! ice fraction on u-grid - uocn , & ! i ocean current (m/s) - vocn , & ! j ocean current (m/s) - cdn_ocn , & ! ocn drag coefficient - waterxU , & ! for ocean stress calculation, x (m/s) - wateryU , & ! for ocean stress calculation, y (m/s) - bxfix , & ! part of bx that is constant during Picard - byfix , & ! part of by that is constant during Picard - umassdti ! mass of U-cell/dte (kg/m^2 s) - - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(out) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) - etax2 , & ! etax2 = 2*eta (shear viscosity) - rep_prs ! replacement pressure - - type (ice_halo), intent(in) :: & - halo_info_mask ! ghost cell update info for masked halo - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - fpresx , & ! fixed point residual vector, x components: fx = uvel - uprev_k - fpresy , & ! fixed point residual vector, y components: fy = vvel - vprev_k - Cb ! seabed stress coefficient - - real (kind=dbl_kind), dimension (ntot), intent(inout) :: & - sol ! current approximate solution - - ! local variables - - integer (kind=int_kind) :: & - it_nl , & ! nonlinear loop iteration index - res_num , & ! current number of stored residuals - j , & ! iteration index for QR update - iblk , & ! block index - nbiter ! number of FGMRES iterations performed - - integer (kind=int_kind), parameter :: & - inc = 1 ! increment value for BLAS calls - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - uprev_k , & ! uvel at previous Picard iteration - vprev_k , & ! vvel at previous Picard iteration - ulin , & ! uvel to linearize vrel - vlin , & ! vvel to linearize vrel - vrel , & ! coeff for tauw - bx , & ! b vector - by , & ! b vector - diagx , & ! Diagonal (x component) of the matrix A - diagy , & ! Diagonal (y component) of the matrix A - Au , & ! matvec, Fx = bx - Au - Av , & ! matvec, Fy = by - Av - Fx , & ! x residual vector, Fx = bx - Au - Fy , & ! y residual vector, Fy = by - Av - solx , & ! solution of FGMRES (x components) - soly ! solution of FGMRES (y components) - - real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & - stress_Pr, & ! x,y-derivatives of the replacement pressure - diag_rheo ! contributions of the rhelogy term to the diagonal - - real (kind=dbl_kind), dimension (ntot) :: & - res , & ! current residual - res_old , & ! previous residual - res_diff , & ! difference between current and previous residuals - fpfunc , & ! current value of fixed point function - fpfunc_old , & ! previous value of fixed point function - tmp ! temporary vector for BLAS calls - - real (kind=dbl_kind), dimension(ntot,dim_andacc) :: & - Q , & ! Q factor for QR factorization of F (residuals) matrix - G_diff ! Matrix containing the differences of g(x) (fixed point function) evaluations - - real (kind=dbl_kind), dimension(dim_andacc,dim_andacc) :: & - R ! R factor for QR factorization of F (residuals) matrix - - real (kind=dbl_kind), dimension(dim_andacc) :: & - rhs_tri , & ! right hand side vector for matrix-vector product - coeffs ! coeffs used to combine previous solutions - - real (kind=dbl_kind) :: & - ! tol , & ! tolerance for fixed point convergence: reltol_andacc * (initial fixed point residual norm) [unused for now] - tol_nl , & ! tolerance for nonlinear convergence: reltol_nonlin * (initial nonlinear residual norm) - fpres_norm , & ! norm of current fixed point residual : f(x) = g(x) - x - prog_norm , & ! norm of difference between current and previous solution - nlres_norm ! norm of current nonlinear residual : F(x) = A(x)x -b(x) - -#ifdef USE_LAPACK - real (kind=dbl_kind) :: & - ddot, dnrm2 ! external BLAS functions -#endif - - character(len=*), parameter :: subname = '(anderson_solver)' - - ! Initialization - res_num = 0 - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - uprev_k(:,:,iblk) = uvel(:,:,iblk) - vprev_k(:,:,iblk) = vvel(:,:,iblk) - enddo - !$OMP END PARALLEL DO - - ! Start iterations - do it_nl = 0, maxits_nonlin ! nonlinear iteration loop - ! Compute quantities needed for computing PDE residual and g(x) (fixed point map) - !----------------------------------------------------------------- - ! Calc zetax2, etax2, dPr/dx, dPr/dy, Cb and vrel = f(uprev_k, vprev_k) - !----------------------------------------------------------------- - !$OMP PARALLEL DO PRIVATE(iblk,stress_Pr) - do iblk = 1, nblocks - - if (use_mean_vrel) then - ulin(:,:,iblk) = p5*uprev_k(:,:,iblk) + p5*uvel(:,:,iblk) - vlin(:,:,iblk) = p5*vprev_k(:,:,iblk) + p5*vvel(:,:,iblk) - else - ulin(:,:,iblk) = uvel(:,:,iblk) - vlin(:,:,iblk) = vvel(:,:,iblk) - endif - uprev_k(:,:,iblk) = uvel(:,:,iblk) - vprev_k(:,:,iblk) = vvel(:,:,iblk) - - call calc_zeta_dPr (nx_block , ny_block , & - icellT (iblk), & - indxTi (:,iblk), indxTj (:,iblk), & - uprev_k (:,:,iblk), vprev_k (:,:,iblk), & - dxT (:,:,iblk), dyT (:,:,iblk), & - dxhy (:,:,iblk), dyhx (:,:,iblk), & - cxp (:,:,iblk), cyp (:,:,iblk), & - cxm (:,:,iblk), cym (:,:,iblk), & - DminTarea (:,:,iblk),strength (:,:,iblk),& - zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:),& - rep_prs(:,:,iblk,:), stress_Pr (:,:,:)) - - call calc_vrel_Cb (nx_block , ny_block , & - icellU (iblk), Cdn_ocn (:,:,iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - aiU (:,:,iblk), TbU (:,:,iblk), & - uocn (:,:,iblk), vocn (:,:,iblk), & - ulin (:,:,iblk), vlin (:,:,iblk), & - vrel (:,:,iblk), Cb (:,:,iblk)) - - ! prepare b vector (RHS) - call calc_bvec (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - stress_Pr (:,:,:), uarear (:,:,iblk), & - waterxU (:,:,iblk), wateryU (:,:,iblk), & - bxfix (:,:,iblk), byfix (:,:,iblk), & - bx (:,:,iblk), by (:,:,iblk), & - vrel (:,:,iblk)) - - ! Compute nonlinear residual norm (PDE residual) - call matvec (nx_block , ny_block , & - icellU (iblk) , icellT (iblk), & - indxUi (:,iblk) , indxUj (:,iblk), & - indxTi (:,iblk) , indxTj (:,iblk), & - dxT (:,:,iblk) , dyT (:,:,iblk), & - dxhy (:,:,iblk) , dyhx (:,:,iblk), & - cxp (:,:,iblk) , cyp (:,:,iblk), & - cxm (:,:,iblk) , cym (:,:,iblk), & - uprev_k (:,:,iblk) , vprev_k (:,:,iblk), & - vrel (:,:,iblk) , Cb (:,:,iblk), & - zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:), & - umassdti (:,:,iblk) , fmU (:,:,iblk), & - uarear (:,:,iblk) , & - Au (:,:,iblk) , Av (:,:,iblk)) - call residual_vec (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - bx (:,:,iblk), by (:,:,iblk), & - Au (:,:,iblk), Av (:,:,iblk), & - Fx (:,:,iblk), Fy (:,:,iblk)) - enddo - !$OMP END PARALLEL DO - nlres_norm = global_norm(nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - Fx , Fy ) - if (my_task == master_task .and. monitor_nonlin) then - write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & - " nonlin_res_L2norm= ", nlres_norm - endif - ! Compute relative tolerance at first iteration - if (it_nl == 0) then - tol_nl = reltol_nonlin*nlres_norm - endif - - ! Check for nonlinear convergence - if (nlres_norm < tol_nl) then - exit - endif - - ! Put initial guess for FGMRES in solx,soly and sol (needed for anderson) - solx = uprev_k - soly = vprev_k - call arrays_to_vec (nx_block , ny_block , & - nblocks , max_blocks , & - icellU (:), ntot , & - indxUi (:,:), indxUj (:,:), & - uprev_k (:,:,:), vprev_k (:,:,:), & - sol (:)) - - ! Compute fixed point map g(x) - if (fpfunc_andacc == 1) then - ! g_1(x) = FGMRES(A(x), b(x)) - - ! Prepare diagonal for preconditioner - if (precond == 'diag' .or. precond == 'pgmres') then - !$OMP PARALLEL DO PRIVATE(iblk,diag_rheo) - do iblk = 1, nblocks - ! first compute diagonal contributions due to rheology term - call formDiag_step1 (nx_block , ny_block , & - icellU (iblk) , & - indxUi (:,iblk) , indxUj(:,iblk), & - dxT (:,:,iblk) , dyT (:,:,iblk), & - dxhy (:,:,iblk) , dyhx(:,:,iblk), & - cxp (:,:,iblk) , cyp (:,:,iblk), & - cxm (:,:,iblk) , cym (:,:,iblk), & - zetax2 (:,:,iblk,:), etax2(:,:,iblk,:), & - diag_rheo(:,:,:)) - ! second compute the full diagonal - call formDiag_step2 (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - diag_rheo (:,:,:), vrel (:,:,iblk), & - umassdti (:,:,iblk), & - uarear (:,:,iblk), Cb (:,:,iblk), & - diagx (:,:,iblk), diagy (:,:,iblk)) - enddo - !$OMP END PARALLEL DO - endif - - ! FGMRES linear solver - call fgmres (zetax2 , etax2 , & - Cb , vrel , & - umassdti , & - halo_info_mask, & - bx , by , & - diagx , diagy , & - reltol_fgmres , dim_fgmres, & - maxits_fgmres , & - solx , soly , & - nbiter) - ! Put FGMRES solution solx,soly in fpfunc vector (needed for Anderson) - call arrays_to_vec (nx_block , ny_block , & - nblocks , max_blocks , & - icellU (:), ntot , & - indxUi (:,:), indxUj (:,:), & - solx (:,:,:), soly (:,:,:), & - fpfunc (:)) - elseif (fpfunc_andacc == 2) then - ! g_2(x) = x - A(x)x + b(x) = x - F(x) - call abort_ice(error_message=subname // " Fixed point function g_2(x) not yet implemented (fpfunc_andacc = 2)" , & - file=__FILE__, line=__LINE__) - endif - - ! Compute fixed point residual f(x) = g(x) - x - res = fpfunc - sol -#ifdef USE_LAPACK - fpres_norm = global_sum(dnrm2(size(res), res, inc)**2, distrb_info) -#else - call vec_to_arrays (nx_block , ny_block , & - nblocks , max_blocks , & - icellU (:), ntot , & - indxUi (:,:), indxUj(:,:) , & - res (:), & - fpresx (:,:,:), fpresy (:,:,:)) - fpres_norm = global_norm(nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - fpresx , fpresy ) -#endif - if (my_task == master_task .and. monitor_nonlin) then - write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & - " fixed_point_res_L2norm= ", fpres_norm - endif - - ! Not used for now (only nonlinear residual is checked) - ! ! Store initial residual norm - ! if (it_nl == 0) then - ! tol = reltol_andacc*fpres_norm - ! endif - ! - ! ! Check residual - ! if (fpres_norm < tol) then - ! exit - ! endif - - if (dim_andacc == 0 .or. it_nl < start_andacc) then - ! Simple fixed point (Picard) iteration in this case - sol = fpfunc - else -#ifdef USE_LAPACK - ! Begin Anderson acceleration - if (get_num_procs() > 1) then - ! Anderson solver is not yet parallelized; abort - if (my_task == master_task) then - call abort_ice(error_message=subname // " Anderson solver (algo_nonlin = 'anderson') is not yet parallelized, and nprocs > 1 " , & - file=__FILE__, line=__LINE__) - endif - endif - if (it_nl > start_andacc) then - ! Update residual difference vector - res_diff = res - res_old - ! Update fixed point function difference matrix - if (res_num < dim_andacc) then - ! Add column - G_diff(:,res_num+1) = fpfunc - fpfunc_old - else - ! Delete first column and add column - G_diff(:,1:res_num-1) = G_diff(:,2:res_num) - G_diff(:,res_num) = fpfunc - fpfunc_old - endif - res_num = res_num + 1 - endif - res_old = res - fpfunc_old = fpfunc - if (res_num == 0) then - sol = fpfunc - else - if (res_num == 1) then - ! Initialize QR factorization - R(1,1) = dnrm2(size(res_diff), res_diff, inc) - Q(:,1) = res_diff/R(1,1) - else - if (res_num > dim_andacc) then - ! Update factorization since 1st column was deleted - call qr_delete(Q,R) - res_num = res_num - 1 - endif - ! Update QR factorization for new column - do j = 1, res_num - 1 - R(j,res_num) = ddot(ntot, Q(:,j), inc, res_diff, inc) - res_diff = res_diff - R(j,res_num) * Q(:,j) - enddo - R(res_num, res_num) = dnrm2(size(res_diff) ,res_diff, inc) - Q(:,res_num) = res_diff / R(res_num, res_num) - endif - ! TODO: here, drop more columns to improve conditioning - ! if (droptol) then - - ! endif - ! Solve least square problem for coefficients - ! 1. Compute rhs_tri = Q^T * res - call dgemv ('t', size(Q,1), res_num, c1, Q(:,1:res_num), size(Q,1), res, inc, c0, rhs_tri, inc) - ! 2. Solve R*coeffs = rhs_tri, put result in rhs_tri - call dtrsv ('u', 'n', 'n', res_num, R(1:res_num,1:res_num), res_num, rhs_tri, inc) - coeffs = rhs_tri - ! Update approximate solution: x = fpfunc - G_diff*coeffs, put result in fpfunc - call dgemv ('n', size(G_diff,1), res_num, -c1, G_diff(:,1:res_num), size(G_diff,1), coeffs, inc, c1, fpfunc, inc) - sol = fpfunc - ! Apply damping - if (damping_andacc > 0 .and. damping_andacc /= 1) then - ! x = x - (1-beta) (res - Q*R*coeffs) - - ! tmp = R*coeffs - call dgemv ('n', res_num, res_num, c1, R(1:res_num,1:res_num), res_num, coeffs, inc, c0, tmp, inc) - ! res = res - Q*tmp - call dgemv ('n', size(Q,1), res_num, -c1, Q(:,1:res_num), size(Q,1), tmp, inc, c1, res, inc) - ! x = x - (1-beta)*res - sol = sol - (1-damping_andacc)*res - endif - endif -#else - ! Anderson solver is not usable without LAPACK; abort - call abort_ice(error_message=subname // " CICE was not compiled with LAPACK, "// & - "and Anderson solver was chosen (algo_nonlin = 'anderson')" , & - file=__FILE__, line=__LINE__) -#endif - endif - - !----------------------------------------------------------------------- - ! Put vector sol in uvel and vvel arrays - !----------------------------------------------------------------------- - call vec_to_arrays (nx_block , ny_block , & - nblocks , max_blocks , & - icellU (:), ntot , & - indxUi (:,:), indxUj (:,:), & - sol (:), & - uvel (:,:,:), vvel (:,:,:)) - - ! Do halo update so that halo cells contain up to date info for advection - call stack_fields(uvel, vvel, fld2) - call ice_timer_start(timer_bound) - if (maskhalo_dyn) then - call ice_HaloUpdate (fld2, halo_info_mask, & - field_loc_NEcorner, field_type_vector) - else - call ice_HaloUpdate (fld2, halo_info, & - field_loc_NEcorner, field_type_vector) - endif - call ice_timer_stop(timer_bound) - call unstack_fields(fld2, uvel, vvel) - - ! Compute "progress" residual norm - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - fpresx(:,:,iblk) = uvel(:,:,iblk) - uprev_k(:,:,iblk) - fpresy(:,:,iblk) = vvel(:,:,iblk) - vprev_k(:,:,iblk) - enddo - !$OMP END PARALLEL DO - prog_norm = global_norm(nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - fpresx , fpresy ) - if (my_task == master_task .and. monitor_nonlin) then - write(nu_diag, '(a,i4,a,d26.16)') "monitor_nonlin: iter_nonlin= ", it_nl, & - " progress_res_L2norm= ", prog_norm - endif - - enddo ! nonlinear iteration loop - - end subroutine anderson_solver - -!======================================================================= - -! Computes the viscosities and dPr/dx, dPr/dy - - subroutine calc_zeta_dPr (nx_block, ny_block, & - icellT , & - indxTi , indxTj , & - uvel , vvel , & - dxT , dyT , & - dxhy , dyhx , & - cxp , cyp , & - cxm , cym , & - DminTarea,strength, & - zetax2 , etax2 , & - rep_prs , stPr) - - use ice_dyn_shared, only: strain_rates, visc_replpress, & - capping - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxTi , & ! compressed index in i-direction - indxTj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - strength , & ! ice strength (N/m) - uvel , & ! x-component of velocity (m/s) - vvel , & ! y-component of velocity (m/s) - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - dxhy , & ! 0.5*(HTE - HTW) - dyhx , & ! 0.5*(HTN - HTS) - cyp , & ! 1.5*HTE - 0.5*HTW - cxp , & ! 1.5*HTN - 0.5*HTS - cym , & ! 0.5*HTE - 1.5*HTW - cxm , & ! 0.5*HTN - 1.5*HTS - DminTarea ! deltaminVP*tarea - - real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(out) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) - etax2 , & ! etax2 = 2*eta (shear viscosity) - rep_prs ! replacement pressure - - real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(out) :: & - stPr ! stress combinations from replacement pressure - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - divune, divunw, divuse, divusw , & ! divergence - tensionne, tensionnw, tensionse, tensionsw , & ! tension - shearne, shearnw, shearse, shearsw , & ! shearing - Deltane, Deltanw, Deltase, Deltasw , & ! Delt - ssigpn, ssigps, ssigpe, ssigpw, ssigp1, ssigp2, & - csigpne, csigpnw, csigpsw, csigpse , & - stressp_1, stressp_2, stressp_3, stressp_4 , & - strp_tmp - - character(len=*), parameter :: subname = '(calc_zeta_dPr)' - - ! Initialize stPr, zetax2 and etax2 to zero - ! (for cells where iceTmask is false) - stPr = c0 - zetax2 = c0 - etax2 = c0 - - do ij = 1, icellT - i = indxTi(ij) - j = indxTj(ij) - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - call strain_rates (nx_block , ny_block , & - i , j , & - uvel , vvel , & - dxT , dyT , & - cxp , cyp , & - cxm , cym , & - divune , divunw , & - divuse , divusw , & - tensionne, tensionnw, & - tensionse, tensionsw, & - shearne , shearnw , & - shearse , shearsw , & - Deltane , Deltanw , & - Deltase , Deltasw) - - !----------------------------------------------------------------- - ! viscosities and replacement pressure - !----------------------------------------------------------------- - - call visc_replpress (strength(i,j) , DminTarea(i,j) , & - Deltane , zetax2 (i,j,1), & - etax2 (i,j,1), rep_prs (i,j,1), & - capping) - call visc_replpress (strength(i,j) , DminTarea(i,j) , & - Deltanw , zetax2 (i,j,2), & - etax2 (i,j,2), rep_prs (i,j,2), & - capping) - call visc_replpress (strength(i,j) , DminTarea(i,j) , & - Deltasw , zetax2 (i,j,3), & - etax2 (i,j,3), rep_prs (i,j,3), & - capping) - call visc_replpress (strength(i,j) , DminTarea(i,j) , & - Deltase , zetax2 (i,j,4), & - etax2 (i,j,4), rep_prs (i,j,4), & - capping) - - !----------------------------------------------------------------- - ! the stresses ! kg/s^2 - ! (1) northeast, (2) northwest, (3) southwest, (4) southeast - !----------------------------------------------------------------- - - stressp_1 = -rep_prs(i,j,1) - stressp_2 = -rep_prs(i,j,2) - stressp_3 = -rep_prs(i,j,3) - stressp_4 = -rep_prs(i,j,4) - - !----------------------------------------------------------------- - ! combinations of the Pr related stresses for the momentum equation ! kg/s^2 - !----------------------------------------------------------------- - - ssigpn = stressp_1 + stressp_2 - ssigps = stressp_3 + stressp_4 - ssigpe = stressp_1 + stressp_4 - ssigpw = stressp_2 + stressp_3 - ssigp1 =(stressp_1 + stressp_3)*p055 - ssigp2 =(stressp_2 + stressp_4)*p055 - - csigpne = p111*stressp_1 + ssigp2 + p027*stressp_3 - csigpnw = p111*stressp_2 + ssigp1 + p027*stressp_4 - csigpsw = p111*stressp_3 + ssigp2 + p027*stressp_1 - csigpse = p111*stressp_4 + ssigp1 + p027*stressp_2 - - !----------------------------------------------------------------- - ! for dF/dx (u momentum) - !----------------------------------------------------------------- - strp_tmp = p25*dyT(i,j)*(p333*ssigpn + p166*ssigps) - - ! northeast (i,j) - stPr(i,j,1) = -strp_tmp & - + dxhy(i,j)*(-csigpne) - - ! northwest (i+1,j) - stPr(i,j,2) = strp_tmp & - + dxhy(i,j)*(-csigpnw) - - strp_tmp = p25*dyT(i,j)*(p333*ssigps + p166*ssigpn) - - ! southeast (i,j+1) - stPr(i,j,3) = -strp_tmp & - + dxhy(i,j)*(-csigpse) - - ! southwest (i+1,j+1) - stPr(i,j,4) = strp_tmp & - + dxhy(i,j)*(-csigpsw) - - !----------------------------------------------------------------- - ! for dF/dy (v momentum) - !----------------------------------------------------------------- - strp_tmp = p25*dxT(i,j)*(p333*ssigpe + p166*ssigpw) - - ! northeast (i,j) - stPr(i,j,5) = -strp_tmp & - - dyhx(i,j)*(csigpne) - - ! southeast (i,j+1) - stPr(i,j,6) = strp_tmp & - - dyhx(i,j)*(csigpse) - - strp_tmp = p25*dxT(i,j)*(p333*ssigpw + p166*ssigpe) - - ! northwest (i+1,j) - stPr(i,j,7) = -strp_tmp & - - dyhx(i,j)*(csigpnw) - - ! southwest (i+1,j+1) - stPr(i,j,8) = strp_tmp & - - dyhx(i,j)*(csigpsw) - - enddo ! ij - - end subroutine calc_zeta_dPr - -!======================================================================= - -! Computes the VP stresses (as diagnostic) - -! Lemieux, J.-F., and Dupont, F. (2020), On the calculation of normalized -! viscous-plastic sea ice stresses, Geosci. Model Dev., 13, 1763–1769, - - subroutine stress_vp (nx_block , ny_block , & - icellT , & - indxTi , indxTj , & - uvel , vvel , & - dxT , dyT , & - cxp , cyp , & - cxm , cym , & - zetax2 , etax2 , & - rep_prs , & - stressp_1 , stressp_2 , & - stressp_3 , stressp_4 , & - stressm_1 , stressm_2 , & - stressm_3 , stressm_4 , & - stress12_1, stress12_2, & - stress12_3, stress12_4) - - use ice_dyn_shared, only: strain_rates - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellT ! no. of cells where iceTmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxTi , & ! compressed index in i-direction - indxTj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvel , & ! x-component of velocity (m/s) - vvel , & ! y-component of velocity (m/s) - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - cyp , & ! 1.5*HTE - 0.5*HTW - cxp , & ! 1.5*HTN - 0.5*HTS - cym , & ! 0.5*HTE - 1.5*HTW - cxm ! 0.5*HTN - 1.5*HTS - - real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) - etax2 , & ! etax2 = 2*eta (shear viscosity) - rep_prs - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 - stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 - stress12_1,stress12_2,stress12_3,stress12_4 ! sigma12 - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - divune, divunw, divuse, divusw , & ! divergence - tensionne, tensionnw, tensionse, tensionsw, & ! tension - shearne, shearnw, shearse, shearsw , & ! shearing - Deltane, Deltanw, Deltase, Deltasw ! Delt - - character(len=*), parameter :: subname = '(stress_vp)' - - do ij = 1, icellT - i = indxTi(ij) - j = indxTj(ij) - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - call strain_rates (nx_block , ny_block , & - i , j , & - uvel , vvel , & - dxT , dyT , & - cxp , cyp , & - cxm , cym , & - divune , divunw , & - divuse , divusw , & - tensionne, tensionnw, & - tensionse, tensionsw, & - shearne , shearnw , & - shearse , shearsw , & - Deltane , Deltanw , & - Deltase , Deltasw) - - !----------------------------------------------------------------- - ! the stresses ! kg/s^2 - ! (1) northeast, (2) northwest, (3) southwest, (4) southeast - !----------------------------------------------------------------- - - stressp_1(i,j) = zetax2(i,j,1)*divune - rep_prs(i,j,1) - stressp_2(i,j) = zetax2(i,j,2)*divunw - rep_prs(i,j,2) - stressp_3(i,j) = zetax2(i,j,3)*divusw - rep_prs(i,j,3) - stressp_4(i,j) = zetax2(i,j,4)*divuse - rep_prs(i,j,4) - - stressm_1(i,j) = etax2(i,j,1)*tensionne - stressm_2(i,j) = etax2(i,j,2)*tensionnw - stressm_3(i,j) = etax2(i,j,3)*tensionsw - stressm_4(i,j) = etax2(i,j,4)*tensionse - - stress12_1(i,j) = etax2(i,j,1)*shearne*p5 - stress12_2(i,j) = etax2(i,j,2)*shearnw*p5 - stress12_3(i,j) = etax2(i,j,3)*shearsw*p5 - stress12_4(i,j) = etax2(i,j,4)*shearse*p5 - - enddo ! ij - - end subroutine stress_vp - -!======================================================================= - -! Compute vrel and seabed stress coefficients - - subroutine calc_vrel_Cb (nx_block, ny_block, & - icellU , Cw , & - indxUi , indxUj , & - aiU , TbU , & - uocn , vocn , & - uvel , vvel , & - vrel , Cb) - - use ice_dyn_shared, only: u0 ! residual velocity for seabed stress (m/s) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU ! total count when iceUmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - TbU, & ! seabed stress factor (N/m^2) - aiU , & ! ice fraction on u-grid - uocn , & ! ocean current, x-direction (m/s) - vocn , & ! ocean current, y-direction (m/s) - Cw ! ocean-ice neutral drag coefficient - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvel , & ! x-component of velocity (m/s) - vvel ! y-component of velocity (m/s) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - vrel , & ! coeff for tauw - Cb ! seabed stress coeff - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - rhow ! - - character(len=*), parameter :: subname = '(calc_vrel_Cb)' - - call icepack_query_parameters(rhow_out=rhow) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) - - ! (magnitude of relative ocean current)*rhow*drag*aice - vrel(i,j) = aiU(i,j)*rhow*Cw(i,j)*sqrt((uocn(i,j) - uvel(i,j))**2 + & - (vocn(i,j) - vvel(i,j))**2) ! m/s - - Cb(i,j) = TbU(i,j) / (sqrt(uvel(i,j)**2 + vvel(i,j)**2) + u0) ! for seabed stress - enddo ! ij - - end subroutine calc_vrel_Cb - -!======================================================================= - -! Compute seabed stress (diagnostic) - - subroutine calc_seabed_stress (nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - uvel , vvel , & - Cb , & - taubxU , taubyU) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU ! total count when iceUmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvel , & ! x-component of velocity (m/s) - vvel , & ! y-component of velocity (m/s) - Cb ! seabed stress coefficient - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - taubxU , & ! seabed stress, x-direction (N/m^2) - taubyU ! seabed stress, y-direction (N/m^2) - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - character(len=*), parameter :: subname = '(calc_seabed_stress)' - - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) - - taubxU(i,j) = -uvel(i,j)*Cb(i,j) - taubyU(i,j) = -vvel(i,j)*Cb(i,j) - enddo ! ij - - end subroutine calc_seabed_stress - -!======================================================================= - -! Computes the matrix vector product A(u,v) * (u,v) -! Au = A(u,v)_[x] * uvel (x components of A(u,v) * (u,v)) -! Av = A(u,v)_[y] * vvel (y components of A(u,v) * (u,v)) - - subroutine matvec (nx_block, ny_block, & - icellU , icellT , & - indxUi , indxUj , & - indxTi , indxTj , & - dxT , dyT , & - dxhy , dyhx , & - cxp , cyp , & - cxm , cym , & - uvel , vvel , & - vrel , Cb , & - zetax2 , etax2 , & - umassdti, fmU , & - uarear , & - Au , Av) - - use ice_dyn_shared, only: strain_rates - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU, & ! total count when iceUmask = .true. - icellT ! total count when iceTmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj , & ! compressed index in j-direction - indxTi , & ! compressed index in i-direction - indxTj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - dxhy , & ! 0.5*(HTE - HTW) - dyhx , & ! 0.5*(HTN - HTS) - cyp , & ! 1.5*HTE - 0.5*HTW - cxp , & ! 1.5*HTN - 0.5*HTS - cym , & ! 0.5*HTE - 1.5*HTW - cxm ! 0.5*HTN - 1.5*HTS - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvel , & ! x-component of velocity (m/s) - vvel , & ! y-component of velocity (m/s) - vrel , & ! coefficient for tauw - Cb , & ! coefficient for seabed stress - umassdti, & ! mass of U-cell/dt (kg/m^2 s) - fmU , & ! Coriolis param. * mass in U-cell (kg/s) - uarear ! 1/uarea - - real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) - etax2 ! etax2 = 2*eta (shear viscosity) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - Au , & ! matvec, Fx = bx - Au (N/m^2) - Av ! matvec, Fy = by - Av (N/m^2) - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind), dimension(nx_block,ny_block,8):: & - str - - real (kind=dbl_kind) :: & - ccaimp,ccb , & ! intermediate variables - strintx, strinty ! divergence of the internal stress tensor - - real (kind=dbl_kind) :: & - divune, divunw, divuse, divusw , & ! divergence - tensionne, tensionnw, tensionse, tensionsw, & ! tension - shearne, shearnw, shearse, shearsw , & ! shearing - Deltane, Deltanw, Deltase, Deltasw , & ! Delt - ssigpn, ssigps, ssigpe, ssigpw , & - ssigmn, ssigms, ssigme, ssigmw , & - ssig12n, ssig12s, ssig12e, ssig12w , & - ssigp1, ssigp2, ssigm1, ssigm2, ssig121, ssig122, & - csigpne, csigpnw, csigpse, csigpsw , & - csigmne, csigmnw, csigmse, csigmsw , & - csig12ne, csig12nw, csig12se, csig12sw , & - str12ew, str12we, str12ns, str12sn , & - strp_tmp, strm_tmp - - real (kind=dbl_kind) :: & - stressp_1, stressp_2, stressp_3, stressp_4 , & ! sigma11+sigma22 (without Pr) - stressm_1, stressm_2, stressm_3, stressm_4 , & ! sigma11-sigma22 - stress12_1,stress12_2,stress12_3,stress12_4 ! sigma12 - - character(len=*), parameter :: subname = '(matvec)' - - !----------------------------------------------------------------- - ! Initialize - !----------------------------------------------------------------- - - str(:,:,:) = c0 - - do ij = 1, icellT - i = indxTi(ij) - j = indxTj(ij) - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - call strain_rates (nx_block , ny_block , & - i , j , & - uvel , vvel , & - dxT , dyT , & - cxp , cyp , & - cxm , cym , & - divune , divunw , & - divuse , divusw , & - tensionne, tensionnw, & - tensionse, tensionsw, & - shearne , shearnw , & - shearse , shearsw , & - Deltane , Deltanw , & - Deltase , Deltasw) - - !----------------------------------------------------------------- - ! the stresses ! kg/s^2 - ! (1) northeast, (2) northwest, (3) southwest, (4) southeast - ! NOTE: commented part of stressp is from the replacement pressure Pr - !----------------------------------------------------------------- - - stressp_1 = zetax2(i,j,1)*divune! - Deltane*(c1-Ktens)) - stressp_2 = zetax2(i,j,2)*divunw! - Deltanw*(c1-Ktens)) - stressp_3 = zetax2(i,j,3)*divusw! - Deltasw*(c1-Ktens)) - stressp_4 = zetax2(i,j,4)*divuse! - Deltase*(c1-Ktens)) - - stressm_1 = etax2(i,j,1)*tensionne - stressm_2 = etax2(i,j,2)*tensionnw - stressm_3 = etax2(i,j,3)*tensionsw - stressm_4 = etax2(i,j,4)*tensionse - - stress12_1 = etax2(i,j,1)*shearne*p5 - stress12_2 = etax2(i,j,2)*shearnw*p5 - stress12_3 = etax2(i,j,3)*shearsw*p5 - stress12_4 = etax2(i,j,4)*shearse*p5 - - !----------------------------------------------------------------- - ! combinations of the stresses for the momentum equation ! kg/s^2 - !----------------------------------------------------------------- - - ssigpn = stressp_1 + stressp_2 - ssigps = stressp_3 + stressp_4 - ssigpe = stressp_1 + stressp_4 - ssigpw = stressp_2 + stressp_3 - ssigp1 =(stressp_1 + stressp_3)*p055 - ssigp2 =(stressp_2 + stressp_4)*p055 - - ssigmn = stressm_1 + stressm_2 - ssigms = stressm_3 + stressm_4 - ssigme = stressm_1 + stressm_4 - ssigmw = stressm_2 + stressm_3 - ssigm1 =(stressm_1 + stressm_3)*p055 - ssigm2 =(stressm_2 + stressm_4)*p055 - - ssig12n = stress12_1 + stress12_2 - ssig12s = stress12_3 + stress12_4 - ssig12e = stress12_1 + stress12_4 - ssig12w = stress12_2 + stress12_3 - ssig121 =(stress12_1 + stress12_3)*p111 - ssig122 =(stress12_2 + stress12_4)*p111 - - csigpne = p111*stressp_1 + ssigp2 + p027*stressp_3 - csigpnw = p111*stressp_2 + ssigp1 + p027*stressp_4 - csigpsw = p111*stressp_3 + ssigp2 + p027*stressp_1 - csigpse = p111*stressp_4 + ssigp1 + p027*stressp_2 - - csigmne = p111*stressm_1 + ssigm2 + p027*stressm_3 - csigmnw = p111*stressm_2 + ssigm1 + p027*stressm_4 - csigmsw = p111*stressm_3 + ssigm2 + p027*stressm_1 - csigmse = p111*stressm_4 + ssigm1 + p027*stressm_2 - - csig12ne = p222*stress12_1 + ssig122 & - + p055*stress12_3 - csig12nw = p222*stress12_2 + ssig121 & - + p055*stress12_4 - csig12sw = p222*stress12_3 + ssig122 & - + p055*stress12_1 - csig12se = p222*stress12_4 + ssig121 & - + p055*stress12_2 - - str12ew = p5*dxT(i,j)*(p333*ssig12e + p166*ssig12w) - str12we = p5*dxT(i,j)*(p333*ssig12w + p166*ssig12e) - str12ns = p5*dyT(i,j)*(p333*ssig12n + p166*ssig12s) - str12sn = p5*dyT(i,j)*(p333*ssig12s + p166*ssig12n) - - !----------------------------------------------------------------- - ! for dF/dx (u momentum) - !----------------------------------------------------------------- - strp_tmp = p25*dyT(i,j)*(p333*ssigpn + p166*ssigps) - strm_tmp = p25*dyT(i,j)*(p333*ssigmn + p166*ssigms) - - ! northeast (i,j) - str(i,j,1) = -strp_tmp - strm_tmp - str12ew & - + dxhy(i,j)*(-csigpne + csigmne) + dyhx(i,j)*csig12ne - - ! northwest (i+1,j) - str(i,j,2) = strp_tmp + strm_tmp - str12we & - + dxhy(i,j)*(-csigpnw + csigmnw) + dyhx(i,j)*csig12nw - - strp_tmp = p25*dyT(i,j)*(p333*ssigps + p166*ssigpn) - strm_tmp = p25*dyT(i,j)*(p333*ssigms + p166*ssigmn) - - ! southeast (i,j+1) - str(i,j,3) = -strp_tmp - strm_tmp + str12ew & - + dxhy(i,j)*(-csigpse + csigmse) + dyhx(i,j)*csig12se - - ! southwest (i+1,j+1) - str(i,j,4) = strp_tmp + strm_tmp + str12we & - + dxhy(i,j)*(-csigpsw + csigmsw) + dyhx(i,j)*csig12sw - - !----------------------------------------------------------------- - ! for dF/dy (v momentum) - !----------------------------------------------------------------- - strp_tmp = p25*dxT(i,j)*(p333*ssigpe + p166*ssigpw) - strm_tmp = p25*dxT(i,j)*(p333*ssigme + p166*ssigmw) - - ! northeast (i,j) - str(i,j,5) = -strp_tmp + strm_tmp - str12ns & - - dyhx(i,j)*(csigpne + csigmne) + dxhy(i,j)*csig12ne - - ! southeast (i,j+1) - str(i,j,6) = strp_tmp - strm_tmp - str12sn & - - dyhx(i,j)*(csigpse + csigmse) + dxhy(i,j)*csig12se - - strp_tmp = p25*dxT(i,j)*(p333*ssigpw + p166*ssigpe) - strm_tmp = p25*dxT(i,j)*(p333*ssigmw + p166*ssigme) - - ! northwest (i+1,j) - str(i,j,7) = -strp_tmp + strm_tmp + str12ns & - - dyhx(i,j)*(csigpnw + csigmnw) + dxhy(i,j)*csig12nw - - ! southwest (i+1,j+1) - str(i,j,8) = strp_tmp - strm_tmp + str12sn & - - dyhx(i,j)*(csigpsw + csigmsw) + dxhy(i,j)*csig12sw - - enddo ! ij - icellT - - !----------------------------------------------------------------- - ! Form Au and Av - !----------------------------------------------------------------- - - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) - - ccaimp = umassdti(i,j) + vrel(i,j) * cosw + Cb(i,j) ! kg/m^2 s - - ccb = fmU(i,j) + sign(c1,fmU(i,j)) * vrel(i,j) * sinw ! kg/m^2 s - - ! divergence of the internal stress tensor - strintx = uarear(i,j)* & - (str(i,j,1) + str(i+1,j,2) + str(i,j+1,3) + str(i+1,j+1,4)) - strinty = uarear(i,j)* & - (str(i,j,5) + str(i,j+1,6) + str(i+1,j,7) + str(i+1,j+1,8)) - - Au(i,j) = ccaimp*uvel(i,j) - ccb*vvel(i,j) - strintx - Av(i,j) = ccaimp*vvel(i,j) + ccb*uvel(i,j) - strinty - enddo ! ij - icellU - - end subroutine matvec - -!======================================================================= - -! Compute the constant component of b(u,v) i.e. the part of b(u,v) that -! does not depend on (u,v) and thus do not change during the nonlinear iteration - - subroutine calc_bfix (nx_block , ny_block , & - icellU , & - indxUi , indxUj , & - umassdti , & - forcexU , forceyU , & - uvel_init, vvel_init, & - bxfix , byfix) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU ! no. of cells where iceUmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uvel_init,& ! x-component of velocity (m/s), beginning of time step - vvel_init,& ! y-component of velocity (m/s), beginning of time step - umassdti, & ! mass of U-cell/dt (kg/m^2 s) - forcexU , & ! work array: combined atm stress and ocn tilt, x - forceyU ! work array: combined atm stress and ocn tilt, y - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - bxfix , & ! bx = taux + bxfix - byfix ! by = tauy + byfix - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - character(len=*), parameter :: subname = '(calc_bfix)' - - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) - - bxfix(i,j) = umassdti(i,j)*uvel_init(i,j) + forcexU(i,j) - byfix(i,j) = umassdti(i,j)*vvel_init(i,j) + forceyU(i,j) - enddo - - end subroutine calc_bfix - -!======================================================================= - -! Compute the vector b(u,v), i.e. the part of the nonlinear function F(u,v) -! that cannot be written as A(u,v)*(u,v), where A(u,v) is a matrix with entries -! depending on (u,v) - - subroutine calc_bvec (nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - stPr , uarear , & - waterxU , wateryU , & - bxfix , byfix , & - bx , by , & - vrel) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU ! total count when iceUmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - uarear , & ! 1/uarea - waterxU , & ! for ocean stress calculation, x (m/s) - wateryU , & ! for ocean stress calculation, y (m/s) - bxfix , & ! bx = taux + bxfix - byfix , & ! by = tauy + byfix - vrel ! relative ice-ocean velocity - - real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(in) :: & - stPr - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - bx , & ! b vector, bx = taux + bxfix (N/m^2) - by ! b vector, by = tauy + byfix (N/m^2) - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - taux, tauy , & ! part of ocean stress term - strintx, strinty , & ! divergence of the internal stress tensor (only Pr contributions) - rhow ! - - character(len=*), parameter :: subname = '(calc_bvec)' - - !----------------------------------------------------------------- - ! calc b vector - !----------------------------------------------------------------- - - call icepack_query_parameters(rhow_out=rhow) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) - - ! ice/ocean stress - taux = vrel(i,j)*waterxU(i,j) ! NOTE this is not the entire - tauy = vrel(i,j)*wateryU(i,j) ! ocn stress term - - ! divergence of the internal stress tensor (only Pr part, i.e. dPr/dx, dPr/dy) - strintx = uarear(i,j)* & - (stPr(i,j,1) + stPr(i+1,j,2) + stPr(i,j+1,3) + stPr(i+1,j+1,4)) - strinty = uarear(i,j)* & - (stPr(i,j,5) + stPr(i,j+1,6) + stPr(i+1,j,7) + stPr(i+1,j+1,8)) - - bx(i,j) = bxfix(i,j) + taux + strintx - by(i,j) = byfix(i,j) + tauy + strinty - enddo ! ij - - end subroutine calc_bvec - -!======================================================================= - -! Compute the non linear residual F(u,v) = b(u,v) - A(u,v) * (u,v), -! with Au, Av precomputed as -! Au = A(u,v)_[x] * uvel (x components of A(u,v) * (u,v)) -! Av = A(u,v)_[y] * vvel (y components of A(u,v) * (u,v)) - - subroutine residual_vec (nx_block , ny_block, & - icellU , & - indxUi , indxUj , & - bx , by , & - Au , Av , & - Fx , Fy ) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU ! total count when iceUmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - bx , & ! b vector, bx = taux + bxfix (N/m^2) - by , & ! b vector, by = tauy + byfix (N/m^2) - Au , & ! matvec, Fx = bx - Au (N/m^2) - Av ! matvec, Fy = by - Av (N/m^2) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(inout) :: & - Fx , & ! x residual vector, Fx = bx - Au (N/m^2) - Fy ! y residual vector, Fy = by - Av (N/m^2) - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - character(len=*), parameter :: subname = '(residual_vec)' - - !----------------------------------------------------------------- - ! compute residual - !----------------------------------------------------------------- - - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) - - Fx(i,j) = bx(i,j) - Au(i,j) - Fy(i,j) = by(i,j) - Av(i,j) - enddo ! ij - - end subroutine residual_vec - -!======================================================================= - -! Form the diagonal of the matrix A(u,v) (first part of the computation) -! Part 1: compute the contributions to the diagonal from the rheology term - - subroutine formDiag_step1 (nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - dxT , dyT , & - dxhy , dyhx , & - cxp , cyp , & - cxm , cym , & - zetax2 , etax2 , & - Drheo) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU ! no. of cells where iceUmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - dxhy , & ! 0.5*(HTE - HTW) - dyhx , & ! 0.5*(HTN - HTS) - cyp , & ! 1.5*HTE - 0.5*HTW - cxp , & ! 1.5*HTN - 0.5*HTS - cym , & ! 0.5*HTE - 1.5*HTW - cxm ! 0.5*HTN - 1.5*HTS - - real (kind=dbl_kind), dimension(nx_block,ny_block,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) - etax2 ! etax2 = 2*eta (shear viscosity) - - real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(out) :: & - Drheo ! intermediate value for diagonal components of matrix A associated - ! with rheology term - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij, iu, ju, di, dj, cc - - real (kind=dbl_kind) :: & - divune, divunw, divuse, divusw , & ! divergence - tensionne, tensionnw, tensionse, tensionsw, & ! tension - shearne, shearnw, shearse, shearsw , & ! shearing - uij,ui1j,uij1,ui1j1,vij,vi1j,vij1,vi1j1 , & ! == c0 or c1 - stressp_1, stressp_2, stressp_3, stressp_4, & - stressm_1, stressm_2, stressm_3, stressm_4, & - stress12_1,stress12_2,stress12_3,stress12_4,& - ssigpn, ssigps, ssigpe, ssigpw , & - ssigmn, ssigms, ssigme, ssigmw , & - ssig12n, ssig12s, ssig12e, ssig12w , & - ssigp1, ssigp2, ssigm1, ssigm2, ssig121, ssig122, & - csigpne, csigpnw, csigpse, csigpsw , & - csigmne, csigmnw, csigmse, csigmsw , & - csig12ne, csig12nw, csig12se, csig12sw , & - str12ew, str12we, str12ns, str12sn , & - strp_tmp, strm_tmp - - character(len=*), parameter :: subname = '(formDiag_step1)' - - !----------------------------------------------------------------- - ! Initialize - !----------------------------------------------------------------- - - Drheo(:,:,:) = c0 - - ! Be careful: Drheo contains 4 terms for u and 4 terms for v. - ! These 8 terms come from the surrounding T cells but are all - ! refrerenced to the i,j (u point) : - - ! Drheo(i,j,1) corresponds to str(i,j,1) - ! Drheo(i,j,2) corresponds to str(i+1,j,2) - ! Drheo(i,j,3) corresponds to str(i,j+1,3) - ! Drheo(i,j,4) corresponds to str(i+1,j+1,4)) - ! Drheo(i,j,5) corresponds to str(i,j,5) - ! Drheo(i,j,6) corresponds to str(i,j+1,6) - ! Drheo(i,j,7) corresponds to str(i+1,j,7) - ! Drheo(i,j,8) corresponds to str(i+1,j+1,8)) - - do cc = 1, 8 ! 4 for u and 4 for v - - if (cc == 1) then ! u comp, T cell i,j - uij = c1 - ui1j = c0 - uij1 = c0 - ui1j1 = c0 - vij = c0 - vi1j = c0 - vij1 = c0 - vi1j1 = c0 - di = 0 - dj = 0 - elseif (cc == 2) then ! u comp, T cell i+1,j - uij = c0 - ui1j = c1 - uij1 = c0 - ui1j1 = c0 - vij = c0 - vi1j = c0 - vij1 = c0 - vi1j1 = c0 - di = 1 - dj = 0 - elseif (cc == 3) then ! u comp, T cell i,j+1 - uij = c0 - ui1j = c0 - uij1 = c1 - ui1j1 = c0 - vij = c0 - vi1j = c0 - vij1 = c0 - vi1j1 = c0 - di = 0 - dj = 1 - elseif (cc == 4) then ! u comp, T cell i+1,j+1 - uij = c0 - ui1j = c0 - uij1 = c0 - ui1j1 = c1 - vij = c0 - vi1j = c0 - vij1 = c0 - vi1j1 = c0 - di = 1 - dj = 1 - elseif (cc == 5) then ! v comp, T cell i,j - uij = c0 - ui1j = c0 - uij1 = c0 - ui1j1 = c0 - vij = c1 - vi1j = c0 - vij1 = c0 - vi1j1 = c0 - di = 0 - dj = 0 - elseif (cc == 6) then ! v comp, T cell i,j+1 - uij = c0 - ui1j = c0 - uij1 = c0 - ui1j1 = c0 - vij = c0 - vi1j = c0 - vij1 = c1 - vi1j1 = c0 - di = 0 - dj = 1 - elseif (cc == 7) then ! v comp, T cell i+1,j - uij = c0 - ui1j = c0 - uij1 = c0 - ui1j1 = c0 - vij = c0 - vi1j = c1 - vij1 = c0 - vi1j1 = c0 - di = 1 - dj = 0 - elseif (cc == 8) then ! v comp, T cell i+1,j+1 - uij = c0 - ui1j = c0 - uij1 = c0 - ui1j1 = c0 - vij = c0 - vi1j = c0 - vij1 = c0 - vi1j1 = c1 - di = 1 - dj = 1 - endif - - do ij = 1, icellU - - iu = indxUi(ij) - ju = indxUj(ij) - i = iu + di - j = ju + dj - - !----------------------------------------------------------------- - ! strain rates - ! NOTE these are actually strain rates * area (m^2/s) - !----------------------------------------------------------------- - ! divergence = e_11 + e_22 - divune = cyp(i,j)*uij - dyT(i,j)*ui1j & - + cxp(i,j)*vij - dxT(i,j)*vij1 - divunw = cym(i,j)*ui1j + dyT(i,j)*uij & - + cxp(i,j)*vi1j - dxT(i,j)*vi1j1 - divusw = cym(i,j)*ui1j1 + dyT(i,j)*uij1 & - + cxm(i,j)*vi1j1 + dxT(i,j)*vi1j - divuse = cyp(i,j)*uij1 - dyT(i,j)*ui1j1 & - + cxm(i,j)*vij1 + dxT(i,j)*vij - - ! tension strain rate = e_11 - e_22 - tensionne = -cym(i,j)*uij - dyT(i,j)*ui1j & - + cxm(i,j)*vij + dxT(i,j)*vij1 - tensionnw = -cyp(i,j)*ui1j + dyT(i,j)*uij & - + cxm(i,j)*vi1j + dxT(i,j)*vi1j1 - tensionsw = -cyp(i,j)*ui1j1 + dyT(i,j)*uij1 & - + cxp(i,j)*vi1j1 - dxT(i,j)*vi1j - tensionse = -cym(i,j)*uij1 - dyT(i,j)*ui1j1 & - + cxp(i,j)*vij1 - dxT(i,j)*vij - - ! shearing strain rate = 2*e_12 - shearne = -cym(i,j)*vij - dyT(i,j)*vi1j & - - cxm(i,j)*uij - dxT(i,j)*uij1 - shearnw = -cyp(i,j)*vi1j + dyT(i,j)*vij & - - cxm(i,j)*ui1j - dxT(i,j)*ui1j1 - shearsw = -cyp(i,j)*vi1j1 + dyT(i,j)*vij1 & - - cxp(i,j)*ui1j1 + dxT(i,j)*ui1j - shearse = -cym(i,j)*vij1 - dyT(i,j)*vi1j1 & - - cxp(i,j)*uij1 + dxT(i,j)*uij - - !----------------------------------------------------------------- - ! the stresses ! kg/s^2 - ! (1) northeast, (2) northwest, (3) southwest, (4) southeast - !----------------------------------------------------------------- - - stressp_1 = zetax2(i,j,1)*divune - stressp_2 = zetax2(i,j,2)*divunw - stressp_3 = zetax2(i,j,3)*divusw - stressp_4 = zetax2(i,j,4)*divuse - - stressm_1 = etax2(i,j,1)*tensionne - stressm_2 = etax2(i,j,2)*tensionnw - stressm_3 = etax2(i,j,3)*tensionsw - stressm_4 = etax2(i,j,4)*tensionse - - stress12_1 = etax2(i,j,1)*shearne*p5 - stress12_2 = etax2(i,j,2)*shearnw*p5 - stress12_3 = etax2(i,j,3)*shearsw*p5 - stress12_4 = etax2(i,j,4)*shearse*p5 - - !----------------------------------------------------------------- - ! combinations of the stresses for the momentum equation ! kg/s^2 - !----------------------------------------------------------------- - - ssigpn = stressp_1 + stressp_2 - ssigps = stressp_3 + stressp_4 - ssigpe = stressp_1 + stressp_4 - ssigpw = stressp_2 + stressp_3 - ssigp1 =(stressp_1 + stressp_3)*p055 - ssigp2 =(stressp_2 + stressp_4)*p055 - - ssigmn = stressm_1 + stressm_2 - ssigms = stressm_3 + stressm_4 - ssigme = stressm_1 + stressm_4 - ssigmw = stressm_2 + stressm_3 - ssigm1 =(stressm_1 + stressm_3)*p055 - ssigm2 =(stressm_2 + stressm_4)*p055 - - ssig12n = stress12_1 + stress12_2 - ssig12s = stress12_3 + stress12_4 - ssig12e = stress12_1 + stress12_4 - ssig12w = stress12_2 + stress12_3 - ssig121 =(stress12_1 + stress12_3)*p111 - ssig122 =(stress12_2 + stress12_4)*p111 - - csigpne = p111*stressp_1 + ssigp2 + p027*stressp_3 - csigpnw = p111*stressp_2 + ssigp1 + p027*stressp_4 - csigpsw = p111*stressp_3 + ssigp2 + p027*stressp_1 - csigpse = p111*stressp_4 + ssigp1 + p027*stressp_2 - - csigmne = p111*stressm_1 + ssigm2 + p027*stressm_3 - csigmnw = p111*stressm_2 + ssigm1 + p027*stressm_4 - csigmsw = p111*stressm_3 + ssigm2 + p027*stressm_1 - csigmse = p111*stressm_4 + ssigm1 + p027*stressm_2 - - csig12ne = p222*stress12_1 + ssig122 & - + p055*stress12_3 - csig12nw = p222*stress12_2 + ssig121 & - + p055*stress12_4 - csig12sw = p222*stress12_3 + ssig122 & - + p055*stress12_1 - csig12se = p222*stress12_4 + ssig121 & - + p055*stress12_2 - - str12ew = p5*dxT(i,j)*(p333*ssig12e + p166*ssig12w) - str12we = p5*dxT(i,j)*(p333*ssig12w + p166*ssig12e) - str12ns = p5*dyT(i,j)*(p333*ssig12n + p166*ssig12s) - str12sn = p5*dyT(i,j)*(p333*ssig12s + p166*ssig12n) - - !----------------------------------------------------------------- - ! for dF/dx (u momentum) - !----------------------------------------------------------------- - - if (cc == 1) then ! T cell i,j - - strp_tmp = p25*dyT(i,j)*(p333*ssigpn + p166*ssigps) - strm_tmp = p25*dyT(i,j)*(p333*ssigmn + p166*ssigms) - - ! northeast (i,j) - Drheo(iu,ju,1) = -strp_tmp - strm_tmp - str12ew & - + dxhy(i,j)*(-csigpne + csigmne) + dyhx(i,j)*csig12ne - - elseif (cc == 2) then ! T cell i+1,j - - strp_tmp = p25*dyT(i,j)*(p333*ssigpn + p166*ssigps) - strm_tmp = p25*dyT(i,j)*(p333*ssigmn + p166*ssigms) - - ! northwest (i+1,j) - Drheo(iu,ju,2) = strp_tmp + strm_tmp - str12we & - + dxhy(i,j)*(-csigpnw + csigmnw) + dyhx(i,j)*csig12nw - - elseif (cc == 3) then ! T cell i,j+1 - - strp_tmp = p25*dyT(i,j)*(p333*ssigps + p166*ssigpn) - strm_tmp = p25*dyT(i,j)*(p333*ssigms + p166*ssigmn) - - ! southeast (i,j+1) - Drheo(iu,ju,3) = -strp_tmp - strm_tmp + str12ew & - + dxhy(i,j)*(-csigpse + csigmse) + dyhx(i,j)*csig12se - - elseif (cc == 4) then ! T cell i+1,j+1 - - strp_tmp = p25*dyT(i,j)*(p333*ssigps + p166*ssigpn) - strm_tmp = p25*dyT(i,j)*(p333*ssigms + p166*ssigmn) - - ! southwest (i+1,j+1) - Drheo(iu,ju,4) = strp_tmp + strm_tmp + str12we & - + dxhy(i,j)*(-csigpsw + csigmsw) + dyhx(i,j)*csig12sw - - !----------------------------------------------------------------- - ! for dF/dy (v momentum) - !----------------------------------------------------------------- - - elseif (cc == 5) then ! T cell i,j - - strp_tmp = p25*dxT(i,j)*(p333*ssigpe + p166*ssigpw) - strm_tmp = p25*dxT(i,j)*(p333*ssigme + p166*ssigmw) - - ! northeast (i,j) - Drheo(iu,ju,5) = -strp_tmp + strm_tmp - str12ns & - - dyhx(i,j)*(csigpne + csigmne) + dxhy(i,j)*csig12ne - - elseif (cc == 6) then ! T cell i,j+1 - - strp_tmp = p25*dxT(i,j)*(p333*ssigpe + p166*ssigpw) - strm_tmp = p25*dxT(i,j)*(p333*ssigme + p166*ssigmw) - - ! southeast (i,j+1) - Drheo(iu,ju,6) = strp_tmp - strm_tmp - str12sn & - - dyhx(i,j)*(csigpse + csigmse) + dxhy(i,j)*csig12se - - elseif (cc == 7) then ! T cell i,j+1 - - strp_tmp = p25*dxT(i,j)*(p333*ssigpw + p166*ssigpe) - strm_tmp = p25*dxT(i,j)*(p333*ssigmw + p166*ssigme) - - ! northwest (i+1,j) - Drheo(iu,ju,7) = -strp_tmp + strm_tmp + str12ns & - - dyhx(i,j)*(csigpnw + csigmnw) + dxhy(i,j)*csig12nw - - elseif (cc == 8) then ! T cell i+1,j+1 - - strp_tmp = p25*dxT(i,j)*(p333*ssigpw + p166*ssigpe) - strm_tmp = p25*dxT(i,j)*(p333*ssigmw + p166*ssigme) - - ! southwest (i+1,j+1) - Drheo(iu,ju,8) = strp_tmp - strm_tmp + str12sn & - - dyhx(i,j)*(csigpsw + csigmsw) + dxhy(i,j)*csig12sw - - endif - - enddo ! ij - - enddo ! cc - - end subroutine formDiag_step1 - -!======================================================================= - -! Form the diagonal of the matrix A(u,v) (second part of the computation) -! Part 2: compute diagonal - - subroutine formDiag_step2 (nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - Drheo , vrel , & - umassdti, & - uarear , Cb , & - diagx , diagy) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - icellU ! total count when iceUmask = .true. - - integer (kind=int_kind), dimension (nx_block*ny_block), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - vrel, & ! coefficient for tauw - Cb, & ! coefficient for seabed stress - umassdti, & ! mass of U-cell/dt (kg/m^2 s) - uarear ! 1/uarea - - real (kind=dbl_kind), dimension(nx_block,ny_block,8), intent(in) :: & - Drheo - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - diagx , & ! Diagonal (x component) of the matrix A - diagy ! Diagonal (y component) of the matrix A - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij - - real (kind=dbl_kind) :: & - ccaimp , & ! intermediate variables - strintx, strinty ! diagonal contributions to the divergence - - character(len=*), parameter :: subname = '(formDiag_step2)' - - !----------------------------------------------------------------- - ! integrate the momentum equation - !----------------------------------------------------------------- - - strintx = c0 - strinty = c0 - - ! Be careful: Drheo contains 4 terms for u and 4 terms for v. - ! These 8 terms come from the surrounding T cells but are all - ! refrerenced to the i,j (u point) : - - ! Drheo(i,j,1) corresponds to str(i,j,1) - ! Drheo(i,j,2) corresponds to str(i+1,j,2) - ! Drheo(i,j,3) corresponds to str(i,j+1,3) - ! Drheo(i,j,4) corresponds to str(i+1,j+1,4)) - ! Drheo(i,j,5) corresponds to str(i,j,5) - ! Drheo(i,j,6) corresponds to str(i,j+1,6) - ! Drheo(i,j,7) corresponds to str(i+1,j,7) - ! Drheo(i,j,8) corresponds to str(i+1,j+1,8)) - - do ij = 1, icellU - i = indxUi(ij) - j = indxUj(ij) - - ccaimp = umassdti(i,j) + vrel(i,j) * cosw + Cb(i,j) ! kg/m^2 s - - strintx = uarear(i,j)* & - (Drheo(i,j,1) + Drheo(i,j,2) + Drheo(i,j,3) + Drheo(i,j,4)) - strinty = uarear(i,j)* & - (Drheo(i,j,5) + Drheo(i,j,6) + Drheo(i,j,7) + Drheo(i,j,8)) - - diagx(i,j) = ccaimp - strintx - diagy(i,j) = ccaimp - strinty - enddo ! ij - - end subroutine formDiag_step2 - -!======================================================================= - -! Compute global l^2 norm of a vector field (field_x, field_y) - - function global_norm (nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - field_x , field_y ) & - result(norm) - - use ice_domain, only: distrb_info - use ice_domain_size, only: max_blocks - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ! block dimensions - - integer (kind=int_kind), dimension (max_blocks), intent(in) :: & - icellU ! total count when iceumask is true - - integer (kind=int_kind), dimension (nx_block*ny_block,max_blocks), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & - field_x , & ! x-component of vector field - field_y ! y-component of vector field - - real (kind=dbl_kind) :: & - norm ! l^2 norm of vector field - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij, iblk - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - squared ! temporary array for summed squared components - - character(len=*), parameter :: subname = '(global_norm)' - - norm = sqrt(global_dot_product (nx_block , ny_block , & - icellU , & - indxUi , indxUj , & - field_x , field_y , & - field_x , field_y )) - - end function global_norm - -!======================================================================= - -! Compute global dot product of two grid vectors, each split into X and Y components - - function global_dot_product (nx_block , ny_block , & - icellU , & - indxUi , indxUj , & - vector1_x , vector1_y, & - vector2_x , vector2_y) & - result(dot_product) - - use ice_domain, only: distrb_info - use ice_domain_size, only: max_blocks - use ice_fileunits, only: bfbflag - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block ! block dimensions - - integer (kind=int_kind), dimension (max_blocks), intent(in) :: & - icellU ! total count when iceumask is true - - integer (kind=int_kind), dimension (nx_block*ny_block,max_blocks), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & - vector1_x , & ! x-component of first vector - vector1_y , & ! y-component of first vector - vector2_x , & ! x-component of second vector - vector2_y ! y-component of second vector - - real (kind=dbl_kind) :: & - dot_product ! l^2 norm of vector field - - ! local variables - - integer (kind=int_kind) :: & - i, j, ij, iblk - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - prod ! temporary array - - real (kind=dbl_kind), dimension(max_blocks) :: & - dot ! temporary scalar for accumulating the result - - character(len=*), parameter :: subname = '(global_dot_product)' - - prod = c0 - dot = c0 - - !$OMP PARALLEL DO PRIVATE(i, j, ij, iblk) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - prod(i,j,iblk) = vector1_x(i,j,iblk)*vector2_x(i,j,iblk) + vector1_y(i,j,iblk)*vector2_y(i,j,iblk) - dot(iblk) = dot(iblk) + prod(i,j,iblk) - enddo ! ij - enddo - !$OMP END PARALLEL DO - - ! Use local summation result unless bfbflag is active - if (bfbflag == 'off') then - dot_product = global_sum(sum(dot), distrb_info) - else - dot_product = global_sum(prod, distrb_info, field_loc_NEcorner) - endif - - end function global_dot_product - -!======================================================================= - -! Convert a grid function (tpu,tpv) to a one dimensional vector - - subroutine arrays_to_vec (nx_block, ny_block , & - nblocks , max_blocks, & - icellU , ntot , & - indxUi , indxUj , & - tpu , tpv , & - outvec) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - nblocks, & ! nb of blocks - max_blocks, & ! max nb of blocks - ntot ! size of problem for Anderson - - integer (kind=int_kind), dimension (max_blocks), intent(in) :: & - icellU - - integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (nx_block,ny_block, max_blocks), intent(in) :: & - tpu , & ! x-component of vector - tpv ! y-component of vector - - real (kind=dbl_kind), dimension (ntot), intent(out) :: & - outvec ! output 1D vector - - ! local variables - - integer (kind=int_kind) :: & - i, j, iblk, tot, ij - - character(len=*), parameter :: subname = '(arrays_to_vec)' - - !----------------------------------------------------------------- - ! form vector (converts from max_blocks arrays to single vector) - !----------------------------------------------------------------- - - outvec(:) = c0 - tot = 0 - - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - tot = tot + 1 - outvec(tot) = tpu(i, j, iblk) - tot = tot + 1 - outvec(tot) = tpv(i, j, iblk) - enddo - enddo ! ij - - end subroutine arrays_to_vec - -!======================================================================= - -! Convert one dimensional vector to a grid function (tpu,tpv) - - subroutine vec_to_arrays (nx_block, ny_block , & - nblocks , max_blocks, & - icellU , ntot , & - indxUi , indxUj , & - invec , & - tpu , tpv) - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - nblocks, & ! nb of blocks - max_blocks, & ! max nb of blocks - ntot ! size of problem for Anderson - - integer (kind=int_kind), dimension (max_blocks), intent(in) :: & - icellU - - integer (kind=int_kind), dimension (nx_block*ny_block, max_blocks), intent(in) :: & - indxUi , & ! compressed index in i-direction - indxUj ! compressed index in j-direction - - real (kind=dbl_kind), dimension (ntot), intent(in) :: & - invec ! input 1D vector - - real (kind=dbl_kind), dimension (nx_block,ny_block, max_blocks), intent(out) :: & - tpu , & ! x-component of vector - tpv ! y-component of vector - - ! local variables - - integer (kind=int_kind) :: & - i, j, iblk, tot, ij - - character(len=*), parameter :: subname = '(vec_to_arrays)' - - !----------------------------------------------------------------- - ! form arrays (converts from vector to the max_blocks arrays) - !----------------------------------------------------------------- - - tpu(:,:,:) = c0 - tpv(:,:,:) = c0 - tot = 0 - - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - tot = tot + 1 - tpu(i, j, iblk) = invec(tot) - tot = tot + 1 - tpv(i, j, iblk) = invec(tot) - enddo - enddo! ij - - end subroutine vec_to_arrays - -!======================================================================= - -! Update Q and R factors after deletion of the 1st column of G_diff -! -! author: P. Blain ECCC -! -! adapted from : -! H. F. Walker, “Anderson Acceleration: Algorithms and Implementations” -! [Online]. Available: https://users.wpi.edu/~walker/Papers/anderson_accn_algs_imps.pdf - - subroutine qr_delete(Q, R) - - real (kind=dbl_kind), intent(inout) :: & - Q(:,:), & ! Q factor - R(:,:) ! R factor - - ! local variables - - integer (kind=int_kind) :: & - i, j, k, & ! loop indices - m, n ! size of Q matrix - - real (kind=dbl_kind) :: & - temp, c, s - - character(len=*), parameter :: subname = '(qr_delete)' - - n = size(Q, 1) - m = size(Q, 2) - do i = 1, m-1 - temp = sqrt(R(i, i+1)**2 + R(i+1, i+1)**2) - c = R(i , i+1) / temp - s = R(i+1, i+1) / temp - R(i , i+1) = temp - R(i+1, i+1) = 0 - if (i < m-1) then - do j = i+2, m - temp = c*R(i, j) + s*R(i+1, j) - R(i+1, j) = -s*R(i, j) + c*R(i+1, j) - R(i , j) = temp - enddo - endif - do k = 1, n - temp = c*Q(k, i) + s*Q(k, i+1); - Q(k, i+1) = -s*Q(k, i) + c*Q(k, i+1); - Q(k, i) = temp - enddo - enddo - R(:, 1:m-1) = R(:, 2:m) - - end subroutine qr_delete - -!======================================================================= - -! FGMRES: Flexible generalized minimum residual method (with restarts). -! Solves the linear system A x = b using GMRES with a varying (right) preconditioner -! -! authors: Stéphane Gaudreault, Abdessamad Qaddouri, Philippe Blain, ECCC - - subroutine fgmres (zetax2 , etax2 , & - Cb , vrel , & - umassdti , & - halo_info_mask , & - bx , by , & - diagx , diagy , & - tolerance, maxinner, & - maxouter , & - solx , soly , & - nbiter) - - use ice_boundary, only: ice_HaloUpdate - use ice_domain, only: maskhalo_dyn, halo_info - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound - - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) - etax2 ! etax2 = 2*eta (shear viscosity) - - real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & - vrel , & ! coefficient for tauw - Cb , & ! seabed stress coefficient - umassdti ! mass of U-cell/dte (kg/m^2 s) - - type (ice_halo), intent(in) :: & - halo_info_mask ! ghost cell update info for masked halo - - real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & - bx , & ! Right hand side of the linear system (x components) - by , & ! Right hand side of the linear system (y components) - diagx , & ! Diagonal of the system matrix (x components) - diagy ! Diagonal of the system matrix (y components) - - real (kind=dbl_kind), intent(in) :: & - tolerance ! Tolerance to achieve. The algorithm terminates when the relative - ! residual is below tolerance - - integer (kind=int_kind), intent(in) :: & - maxinner, & ! Restart the method every maxinner inner (Arnoldi) iterations - maxouter ! Maximum number of outer (restarts) iterations - ! Iteration will stop after maxinner*maxouter Arnoldi steps - ! even if the specified tolerance has not been achieved - - real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(inout) :: & - solx , & ! Initial guess on input, approximate solution on output (x components) - soly ! Initial guess on input, approximate solution on output (y components) - - integer (kind=int_kind), intent(out) :: & - nbiter ! Total number of Arnoldi iterations performed - - ! local variables - - integer (kind=int_kind) :: & - iblk , & ! block index - ij , & ! index for indx[t|u][i|j] - i, j ! grid indices - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - workspace_x , & ! work vector (x components) - workspace_y ! work vector (y components) - - real (kind=dbl_kind), dimension (max_blocks) :: & - norm_squared ! array to accumulate squared norm of grid function over blocks - - real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks, maxinner+1) :: & - arnoldi_basis_x , & ! Arnoldi basis (x components) - arnoldi_basis_y ! Arnoldi basis (y components) - - real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks, maxinner) :: & - orig_basis_x , & ! original basis (x components) - orig_basis_y ! original basis (y components) - - real (kind=dbl_kind) :: & - norm_residual , & ! current L^2 norm of residual vector - inverse_norm , & ! inverse of the norm of a vector - norm_rhs , & ! L^2 norm of right-hand-side vector - nu, t ! local temporary values - - integer (kind=int_kind) :: & - initer , & ! inner (Arnoldi) loop counter - outiter , & ! outer (restarts) loop counter - nextit , & ! nextit == initer+1 - it, k, ii, jj ! reusable loop counters - - real (kind=dbl_kind), dimension(maxinner+1) :: & - rot_cos , & ! cosine elements of Givens rotations - rot_sin , & ! sine elements of Givens rotations - rhs_hess ! right hand side vector of the Hessenberg (least squares) system - - real (kind=dbl_kind), dimension(maxinner+1, maxinner) :: & - hessenberg ! system matrix of the Hessenberg (least squares) system - - character (len=char_len) :: & - precond_type ! type of preconditioner - - real (kind=dbl_kind) :: & - relative_tolerance ! relative_tolerance, i.e. tolerance*norm(initial residual) - - character(len=*), parameter :: subname = '(fgmres)' - - ! Here we go ! - - ! Initialize - outiter = 0 - nbiter = 0 - - norm_squared = c0 - precond_type = precond - - ! Cells with no ice should be zero-initialized - workspace_x = c0 - workspace_y = c0 - arnoldi_basis_x = c0 - arnoldi_basis_y = c0 - - ! solution is zero if RHS is zero - norm_rhs = global_norm(nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - bx , by ) - if (norm_rhs == c0) then - solx = bx - soly = by - return - endif - - ! Residual of the initial iterate - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call matvec (nx_block , ny_block , & - icellU (iblk) , icellT (iblk), & - indxUi (:,iblk) , indxUj (:,iblk), & - indxTi (:,iblk) , indxTj (:,iblk), & - dxT (:,:,iblk) , dyT (:,:,iblk), & - dxhy (:,:,iblk) , dyhx (:,:,iblk), & - cxp (:,:,iblk) , cyp (:,:,iblk), & - cxm (:,:,iblk) , cym (:,:,iblk), & - solx (:,:,iblk) , soly (:,:,iblk), & - vrel (:,:,iblk) , Cb (:,:,iblk), & - zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:), & - umassdti (:,:,iblk) , fmU (:,:,iblk), & - uarear (:,:,iblk) , & - workspace_x(:,:,iblk) , workspace_y(:,:,iblk)) - call residual_vec (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - bx (:,:,iblk), by (:,:,iblk), & - workspace_x(:,:,iblk), workspace_y(:,:,iblk), & - arnoldi_basis_x (:,:,iblk, 1), & - arnoldi_basis_y (:,:,iblk, 1)) - enddo - !$OMP END PARALLEL DO - - ! Start outer (restarts) loop - do - ! Compute norm of initial residual - norm_residual = global_norm(nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - arnoldi_basis_x(:,:,:, 1), & - arnoldi_basis_y(:,:,:, 1)) - - if (my_task == master_task .and. monitor_fgmres) then - write(nu_diag, '(a,i4,a,d26.16)') "monitor_fgmres: iter_fgmres= ", nbiter, & - " fgmres_L2norm= ", norm_residual - endif - - ! Current guess is a good enough solution TODO: reactivate and test this - ! if (norm_residual < tolerance) then - ! return - ! end if - - ! Normalize the first Arnoldi vector - inverse_norm = c1 / norm_residual - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - - arnoldi_basis_x(i, j, iblk, 1) = arnoldi_basis_x(i, j, iblk, 1) * inverse_norm - arnoldi_basis_y(i, j, iblk, 1) = arnoldi_basis_y(i, j, iblk, 1) * inverse_norm - enddo ! ij - enddo - !$OMP END PARALLEL DO - - if (outiter == 0) then - relative_tolerance = tolerance * norm_residual - end if - - ! Initialize 1-st term of RHS of Hessenberg system - rhs_hess(1) = norm_residual - rhs_hess(2:) = c0 - - initer = 0 - - ! Start of inner (Arnoldi) loop - do - - nbiter = nbiter + 1 - initer = initer + 1 - nextit = initer + 1 - ! precondition the current Arnoldi vector - call precondition(zetax2 , etax2 , & - Cb , vrel , & - umassdti , & - halo_info_mask, & - arnoldi_basis_x(:,:,:,initer), & - arnoldi_basis_y(:,:,:,initer), & - diagx , diagy , & - precond_type, & - workspace_x , workspace_y) - orig_basis_x(:,:,:,initer) = workspace_x - orig_basis_y(:,:,:,initer) = workspace_y - - ! Update workspace with boundary values - call stack_fields(workspace_x, workspace_y, fld2) - call ice_timer_start(timer_bound) - if (maskhalo_dyn) then - call ice_HaloUpdate (fld2, halo_info_mask, & - field_loc_NEcorner, field_type_vector) - else - call ice_HaloUpdate (fld2, halo_info, & - field_loc_NEcorner, field_type_vector) - endif - call ice_timer_stop(timer_bound) - call unstack_fields(fld2, workspace_x, workspace_y) - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call matvec (nx_block , ny_block , & - icellU (iblk) , icellT (iblk), & - indxUi (:,iblk) , indxUj (:,iblk), & - indxTi (:,iblk) , indxTj (:,iblk), & - dxT (:,:,iblk) , dyT (:,:,iblk), & - dxhy (:,:,iblk) , dyhx (:,:,iblk), & - cxp (:,:,iblk) , cyp (:,:,iblk), & - cxm (:,:,iblk) , cym (:,:,iblk), & - workspace_x(:,:,iblk) , workspace_y(:,:,iblk), & - vrel (:,:,iblk) , Cb (:,:,iblk), & - zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:), & - umassdti (:,:,iblk) , fmU (:,:,iblk), & - uarear (:,:,iblk) , & - arnoldi_basis_x(:,:,iblk,nextit), & - arnoldi_basis_y(:,:,iblk,nextit)) - enddo - !$OMP END PARALLEL DO - - ! Orthogonalize the new vector - call orthogonalize(ortho_type , initer , & - nextit , maxinner , & - arnoldi_basis_x, arnoldi_basis_y, & - hessenberg) - - ! Compute norm of new Arnoldi vector and update Hessenberg matrix - hessenberg(nextit,initer) = global_norm(nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - arnoldi_basis_x(:,:,:, nextit), & - arnoldi_basis_y(:,:,:, nextit)) - - ! Watch out for happy breakdown - if (.not. almost_zero( hessenberg(nextit,initer) ) ) then - ! Normalize next Arnoldi vector - inverse_norm = c1 / hessenberg(nextit,initer) - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - - arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit)*inverse_norm - arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit)*inverse_norm - enddo ! ij - enddo - !$OMP END PARALLEL DO - end if - - ! Apply previous Givens rotation to the last column of the Hessenberg matrix - if (initer > 1) then - do k = 2, initer - t = hessenberg(k-1, initer) - hessenberg(k-1, initer) = rot_cos(k-1)*t + rot_sin(k-1)*hessenberg(k, initer) - hessenberg(k, initer) = -rot_sin(k-1)*t + rot_cos(k-1)*hessenberg(k, initer) - end do - end if - - ! Compute and apply new Givens rotation - nu = sqrt(hessenberg(initer,initer)**2 + hessenberg(nextit,initer)**2) - if (.not. almost_zero(nu)) then - rot_cos(initer) = hessenberg(initer,initer) / nu - rot_sin(initer) = hessenberg(nextit,initer) / nu - - rhs_hess(nextit) = -rot_sin(initer) * rhs_hess(initer) - rhs_hess(initer) = rot_cos(initer) * rhs_hess(initer) - - hessenberg(initer,initer) = rot_cos(initer) * hessenberg(initer,initer) + rot_sin(initer) * hessenberg(nextit,initer) - end if - - ! Check for convergence - norm_residual = abs(rhs_hess(nextit)) - - if (my_task == master_task .and. monitor_fgmres) then - write(nu_diag, '(a,i4,a,d26.16)') "monitor_fgmres: iter_fgmres= ", nbiter, & - " fgmres_L2norm= ", norm_residual - endif - - if ((initer >= maxinner) .or. (norm_residual <= relative_tolerance)) then - exit - endif - - end do ! end of inner (Arnoldi) loop - - ! At this point either the maximum number of inner iterations - ! was reached or the absolute residual is below the scaled tolerance. - - ! Solve the (now upper triangular) system "hessenberg * sol_hess = rhs_hess" - ! (sol_hess is stored in rhs_hess) - rhs_hess(initer) = rhs_hess(initer) / hessenberg(initer,initer) - do ii = 2, initer - k = initer - ii + 1 - t = rhs_hess(k) - do j = k + 1, initer - t = t - hessenberg(k,j) * rhs_hess(j) - end do - rhs_hess(k) = t / hessenberg(k,k) - end do - - ! Form linear combination to get new solution iterate - do it = 1, initer - t = rhs_hess(it) - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - - solx(i, j, iblk) = solx(i, j, iblk) + t * orig_basis_x(i, j, iblk, it) - soly(i, j, iblk) = soly(i, j, iblk) + t * orig_basis_y(i, j, iblk, it) - enddo ! ij - enddo - !$OMP END PARALLEL DO - end do - - ! Increment outer loop counter and check for convergence - outiter = outiter + 1 - if (norm_residual <= relative_tolerance .or. outiter >= maxouter) then - return - end if - - ! Solution is not convergent : compute residual vector and continue. - - ! The residual vector is computed here using (see Saad p. 177) : - ! \begin{equation} - ! r = V_{m+1} * Q_m^T * (\gamma_{m+1} * e_{m+1}) - ! \end{equation} - ! where : - ! $r$ is the residual - ! $V_{m+1}$ is a matrix whose columns are the Arnoldi vectors from 1 to nextit (m+1) - ! $Q_m$ is the product of the Givens rotation : Q_m = G_m G_{m-1} ... G_1 - ! $gamma_{m+1}$ is the last element of rhs_hess - ! $e_{m+1})$ is the unit vector (0, 0, ..., 1)^T \in \reals^{m+1} - - ! Apply the Givens rotation in reverse order to g := \gamma_{m+1} * e_{m+1}, - ! store the result in rhs_hess - do it = 1, initer - jj = nextit - it + 1 - rhs_hess(jj-1) = -rot_sin(jj-1) * rhs_hess(jj) ! + rot_cos(jj-1) * g(jj-1) (== 0) - rhs_hess(jj) = rot_cos(jj-1) * rhs_hess(jj) ! + rot_sin(jj-1) * g(jj-1) (== 0) - end do - - ! Compute the residual by multiplying V_{m+1} and rhs_hess - workspace_x = c0 - workspace_y = c0 - do it = 1, nextit - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - - workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_x(i, j, iblk, it) - workspace_y(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_y(i, j, iblk, it) - enddo ! ij - enddo - !$OMP END PARALLEL DO - arnoldi_basis_x(:,:,:,1) = workspace_x - arnoldi_basis_y(:,:,:,1) = workspace_y - end do - end do ! end of outer (restarts) loop - - end subroutine fgmres - -!======================================================================= - -! PGMRES: Right-preconditioned generalized minimum residual method (with restarts). -! Solves the linear A x = b using GMRES with a right preconditioner -! -! authors: Stéphane Gaudreault, Abdessamad Qaddouri, Philippe Blain, ECCC - - subroutine pgmres (zetax2 , etax2 , & - Cb , vrel , & - umassdti , & - halo_info_mask, & - bx , by , & - diagx , diagy , & - tolerance, maxinner, & - maxouter , & - solx , soly , & - nbiter) - - use ice_boundary, only: ice_HaloUpdate - use ice_domain, only: maskhalo_dyn, halo_info - use ice_fileunits, only: bfbflag - use ice_timers, only: ice_timer_start, ice_timer_stop, timer_bound - - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) - etax2 ! etax2 = 2*eta (shear viscosity) - - real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & - vrel , & ! coefficient for tauw - Cb , & ! seabed stress coefficient - umassdti ! mass of U-cell/dte (kg/m^2 s) - - type (ice_halo), intent(in) :: & - halo_info_mask ! ghost cell update info for masked halo - - real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & - bx , & ! Right hand side of the linear system (x components) - by ! Right hand side of the linear system (y components) - - real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & - diagx , & ! Diagonal of the system matrix (x components) - diagy ! Diagonal of the system matrix (y components) - - real (kind=dbl_kind), intent(in) :: & - tolerance ! Tolerance to achieve. The algorithm terminates when the relative - ! residual is below tolerance - - integer (kind=int_kind), intent(in) :: & - maxinner, & ! Restart the method every maxinner inner (Arnoldi) iterations - maxouter ! Maximum number of outer (restarts) iterations - ! Iteration will stop after maxinner*maxouter Arnoldi steps - ! even if the specified tolerance has not been achieved - - real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(inout) :: & - solx , & ! Initial guess on input, approximate solution on output (x components) - soly ! Initial guess on input, approximate solution on output (y components) - - integer (kind=int_kind), intent(out) :: & - nbiter ! Total number of Arnoldi iterations performed - - ! local variables - - integer (kind=int_kind) :: & - iblk , & ! block index - ij , & ! index for indx[t|u][i|j] - i, j ! grid indices - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - workspace_x , & ! work vector (x components) - workspace_y ! work vector (y components) - - real (kind=dbl_kind), dimension (max_blocks) :: & - norm_squared ! array to accumulate squared norm of grid function over blocks - - real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks, maxinner+1) :: & - arnoldi_basis_x , & ! Arnoldi basis (x components) - arnoldi_basis_y ! Arnoldi basis (y components) - - real (kind=dbl_kind) :: & - norm_residual , & ! current L^2 norm of residual vector - inverse_norm , & ! inverse of the norm of a vector - nu, t ! local temporary values - - integer (kind=int_kind) :: & - initer , & ! inner (Arnoldi) loop counter - outiter , & ! outer (restarts) loop counter - nextit , & ! nextit == initer+1 - it, k, ii, jj ! reusable loop counters - - real (kind=dbl_kind), dimension(maxinner+1) :: & - rot_cos , & ! cosine elements of Givens rotations - rot_sin , & ! sine elements of Givens rotations - rhs_hess ! right hand side vector of the Hessenberg (least squares) system - - real (kind=dbl_kind), dimension(maxinner+1, maxinner) :: & - hessenberg ! system matrix of the Hessenberg (least squares) system - - character(len=char_len) :: & - precond_type , & ! type of preconditioner - ortho_type ! type of orthogonalization - - real (kind=dbl_kind) :: & - relative_tolerance ! relative_tolerance, i.e. tolerance*norm(initial residual) - - character(len=*), parameter :: subname = '(pgmres)' - - ! Here we go ! - - ! Initialize - outiter = 0 - nbiter = 0 - - norm_squared = c0 - precond_type = 'diag' ! Jacobi preconditioner - ortho_type = 'cgs' ! classical gram-schmidt TODO: try with MGS - - ! Cells with no ice should be zero-initialized - workspace_x = c0 - workspace_y = c0 - arnoldi_basis_x = c0 - arnoldi_basis_y = c0 - - ! Residual of the initial iterate - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call matvec (nx_block , ny_block , & - icellU (iblk) , icellT (iblk), & - indxUi (:,iblk) , indxUj (:,iblk), & - indxTi (:,iblk) , indxTj (:,iblk), & - dxT (:,:,iblk) , dyT (:,:,iblk), & - dxhy (:,:,iblk) , dyhx (:,:,iblk), & - cxp (:,:,iblk) , cyp (:,:,iblk), & - cxm (:,:,iblk) , cym (:,:,iblk), & - solx (:,:,iblk) , soly (:,:,iblk), & - vrel (:,:,iblk) , Cb (:,:,iblk), & - zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:), & - umassdti (:,:,iblk) , fmU (:,:,iblk), & - uarear (:,:,iblk) , & - workspace_x(:,:,iblk) , workspace_y(:,:,iblk)) - call residual_vec (nx_block , ny_block , & - icellU (iblk), & - indxUi (:,iblk), indxUj (:,iblk), & - bx (:,:,iblk), by (:,:,iblk), & - workspace_x(:,:,iblk), workspace_y(:,:,iblk), & - arnoldi_basis_x (:,:,iblk, 1), & - arnoldi_basis_y (:,:,iblk, 1)) - enddo - !$OMP END PARALLEL DO - - ! Start outer (restarts) loop - do - ! Compute norm of initial residual - norm_residual = global_norm(nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - arnoldi_basis_x(:,:,:, 1), & - arnoldi_basis_y(:,:,:, 1)) - - if (my_task == master_task .and. monitor_pgmres) then - write(nu_diag, '(a,i4,a,d26.16)') "monitor_pgmres: iter_pgmres= ", nbiter, & - " pgmres_L2norm= ", norm_residual - endif - - ! Current guess is a good enough solution - ! if (norm_residual < tolerance) then - ! return - ! end if - - ! Normalize the first Arnoldi vector - inverse_norm = c1 / norm_residual - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - - arnoldi_basis_x(i, j, iblk, 1) = arnoldi_basis_x(i, j, iblk, 1) * inverse_norm - arnoldi_basis_y(i, j, iblk, 1) = arnoldi_basis_y(i, j, iblk, 1) * inverse_norm - enddo ! ij - enddo - !$OMP END PARALLEL DO - - if (outiter == 0) then - relative_tolerance = tolerance * norm_residual - end if - - ! Initialize 1-st term of RHS of Hessenberg system - rhs_hess(1) = norm_residual - rhs_hess(2:) = c0 - - initer = 0 - - ! Start of inner (Arnoldi) loop - do - - nbiter = nbiter + 1 - initer = initer + 1 - nextit = initer + 1 - - ! precondition the current Arnoldi vector - call precondition(zetax2 , etax2 , & - Cb , vrel , & - umassdti , & - halo_info_mask, & - arnoldi_basis_x(:,:,:,initer), & - arnoldi_basis_y(:,:,:,initer), & - diagx , diagy , & - precond_type, & - workspace_x , workspace_y) - - ! Update workspace with boundary values - ! NOTE: skipped for efficiency since this is just a preconditioner - ! unless bfbflag is active - if (bfbflag /= 'off') then - call stack_fields(workspace_x, workspace_y, fld2) - call ice_timer_start(timer_bound) - if (maskhalo_dyn) then - call ice_HaloUpdate (fld2, halo_info_mask, & - field_loc_NEcorner, field_type_vector) - else - call ice_HaloUpdate (fld2, halo_info, & - field_loc_NEcorner, field_type_vector) - endif - call ice_timer_stop(timer_bound) - call unstack_fields(fld2, workspace_x, workspace_y) - endif - - !$OMP PARALLEL DO PRIVATE(iblk) - do iblk = 1, nblocks - call matvec (nx_block , ny_block , & - icellU (iblk) , icellT (iblk), & - indxUi (:,iblk) , indxUj (:,iblk), & - indxTi (:,iblk) , indxTj (:,iblk), & - dxT (:,:,iblk) , dyT (:,:,iblk), & - dxhy (:,:,iblk) , dyhx (:,:,iblk), & - cxp (:,:,iblk) , cyp (:,:,iblk), & - cxm (:,:,iblk) , cym (:,:,iblk), & - workspace_x(:,:,iblk) , workspace_y(:,:,iblk), & - vrel (:,:,iblk) , Cb (:,:,iblk), & - zetax2 (:,:,iblk,:), etax2 (:,:,iblk,:), & - umassdti (:,:,iblk) , fmU (:,:,iblk), & - uarear (:,:,iblk) , & - arnoldi_basis_x(:,:,iblk,nextit), & - arnoldi_basis_y(:,:,iblk,nextit)) - enddo - !$OMP END PARALLEL DO - - ! Orthogonalize the new vector - call orthogonalize(ortho_type , initer , & - nextit , maxinner , & - arnoldi_basis_x, arnoldi_basis_y, & - hessenberg) - - ! Compute norm of new Arnoldi vector and update Hessenberg matrix - hessenberg(nextit,initer) = global_norm(nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - arnoldi_basis_x(:,:,:, nextit), & - arnoldi_basis_y(:,:,:, nextit)) - - ! Watch out for happy breakdown - if (.not. almost_zero( hessenberg(nextit,initer) ) ) then - ! Normalize next Arnoldi vector - inverse_norm = c1 / hessenberg(nextit,initer) - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - - arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit)*inverse_norm - arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit)*inverse_norm - enddo ! ij - enddo - !$OMP END PARALLEL DO - end if - - ! Apply previous Givens rotation to the last column of the Hessenberg matrix - if (initer > 1) then - do k = 2, initer - t = hessenberg(k-1, initer) - hessenberg(k-1, initer) = rot_cos(k-1)*t + rot_sin(k-1)*hessenberg(k, initer) - hessenberg(k, initer) = -rot_sin(k-1)*t + rot_cos(k-1)*hessenberg(k, initer) - end do - end if - - ! Compute and apply new Givens rotation - nu = sqrt(hessenberg(initer,initer)**2 + hessenberg(nextit,initer)**2) - if (.not. almost_zero(nu)) then - rot_cos(initer) = hessenberg(initer,initer) / nu - rot_sin(initer) = hessenberg(nextit,initer) / nu - - rhs_hess(nextit) = -rot_sin(initer) * rhs_hess(initer) - rhs_hess(initer) = rot_cos(initer) * rhs_hess(initer) - - hessenberg(initer,initer) = rot_cos(initer) * hessenberg(initer,initer) + rot_sin(initer) * hessenberg(nextit,initer) - end if - - ! Check for convergence - norm_residual = abs(rhs_hess(nextit)) - - if (my_task == master_task .and. monitor_pgmres) then - write(nu_diag, '(a,i4,a,d26.16)') "monitor_pgmres: iter_pgmres= ", nbiter, & - " pgmres_L2norm= ", norm_residual - endif - - if ((initer >= maxinner) .or. (norm_residual <= relative_tolerance)) then - exit - endif - - end do ! end of inner (Arnoldi) loop - - ! At this point either the maximum number of inner iterations - ! was reached or the absolute residual is below the scaled tolerance. - - ! Solve the (now upper triangular) system "hessenberg * sol_hess = rhs_hess" - ! (sol_hess is stored in rhs_hess) - rhs_hess(initer) = rhs_hess(initer) / hessenberg(initer,initer) - do ii = 2, initer - k = initer - ii + 1 - t = rhs_hess(k) - do j = k + 1, initer - t = t - hessenberg(k,j) * rhs_hess(j) - end do - rhs_hess(k) = t / hessenberg(k,k) - end do - - ! Form linear combination to get new solution iterate - workspace_x = c0 - workspace_y = c0 - do it = 1, initer - t = rhs_hess(it) - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - - workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + t * arnoldi_basis_x(i, j, iblk, it) - workspace_y(i, j, iblk) = workspace_y(i, j, iblk) + t * arnoldi_basis_y(i, j, iblk, it) - enddo ! ij - enddo - !$OMP END PARALLEL DO - end do - - ! Call preconditioner - call precondition(zetax2 , etax2 , & - Cb , vrel , & - umassdti , & - halo_info_mask, & - workspace_x , workspace_y, & - diagx , diagy , & - precond_type, & - workspace_x , workspace_y) - - solx = solx + workspace_x - soly = soly + workspace_y - - ! Increment outer loop counter and check for convergence - outiter = outiter + 1 - if (norm_residual <= relative_tolerance .or. outiter >= maxouter) then - return - end if - - ! Solution is not convergent : compute residual vector and continue. - - ! The residual vector is computed here using (see Saad p. 177) : - ! \begin{equation} - ! r = V_{m+1} * Q_m^T * (\gamma_{m+1} * e_{m+1}) - ! \end{equation} - ! where : - ! $r$ is the residual - ! $V_{m+1}$ is a matrix whose columns are the Arnoldi vectors from 1 to nextit (m+1) - ! $Q_m$ is the product of the Givens rotation : Q_m = G_m G_{m-1} ... G_1 - ! $gamma_{m+1}$ is the last element of rhs_hess - ! $e_{m+1})$ is the unit vector (0, 0, ..., 1)^T \in \reals^{m+1} - - ! Apply the Givens rotation in reverse order to g := \gamma_{m+1} * e_{m+1}, - ! store the result in rhs_hess - do it = 1, initer - jj = nextit - it + 1 - rhs_hess(jj-1) = -rot_sin(jj-1) * rhs_hess(jj) ! + rot_cos(jj-1) * g(jj-1) (== 0) - rhs_hess(jj) = rot_cos(jj-1) * rhs_hess(jj) ! + rot_sin(jj-1) * g(jj-1) (== 0) - end do - - ! Compute the residual by multiplying V_{m+1} and rhs_hess - workspace_x = c0 - workspace_y = c0 - do it = 1, nextit - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - - workspace_x(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_x(i, j, iblk, it) - workspace_y(i, j, iblk) = workspace_x(i, j, iblk) + rhs_hess(it) * arnoldi_basis_y(i, j, iblk, it) - enddo ! ij - enddo - !$OMP END PARALLEL DO - arnoldi_basis_x(:,:,:,1) = workspace_x - arnoldi_basis_y(:,:,:,1) = workspace_y - end do - end do ! end of outer (restarts) loop - - end subroutine pgmres - -!======================================================================= - -! Generic routine to precondition a vector -! -! authors: Philippe Blain, ECCC - - subroutine precondition(zetax2 , etax2, & - Cb , vrel , & - umassdti , & - halo_info_mask, & - vx , vy , & - diagx , diagy, & - precond_type, & - wx , wy) - - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks,4), intent(in) :: & - zetax2 , & ! zetax2 = 2*zeta (bulk viscosity) - etax2 ! etax2 = 2*eta (shear viscosity) - - real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks), intent(in) :: & - vrel , & ! coefficient for tauw - Cb , & ! seabed stress coefficient - umassdti ! mass of U-cell/dte (kg/m^2 s) - - type (ice_halo), intent(in) :: & - halo_info_mask ! ghost cell update info for masked halo - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & - vx , & ! input vector (x components) - vy ! input vector (y components) - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(in) :: & - diagx , & ! diagonal of the system matrix (x components) - diagy ! diagonal of the system matrix (y components) - - character (len=char_len), intent(in) :: & - precond_type ! type of preconditioner - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks), intent(inout) :: & - wx , & ! preconditionned vector (x components) - wy ! preconditionned vector (y components) - - ! local variables - - integer (kind=int_kind) :: & - iblk , & ! block index - ij , & ! compressed index - i, j ! grid indices - - real (kind=dbl_kind) :: & - tolerance ! Tolerance for PGMRES - - integer (kind=int_kind) :: & - maxinner ! Restart parameter for PGMRES - - integer (kind=int_kind) :: & - maxouter ! Maximum number of outer iterations for PGMRES - - integer (kind=int_kind) :: & - nbiter ! Total number of iteration PGMRES performed - - character(len=*), parameter :: subname = '(precondition)' - - if (precond_type == 'ident') then ! identity (no preconditioner) - wx = vx - wy = vy - elseif (precond_type == 'diag') then ! Jacobi preconditioner (diagonal) - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - - wx(i,j,iblk) = vx(i,j,iblk) / diagx(i,j,iblk) - wy(i,j,iblk) = vy(i,j,iblk) / diagy(i,j,iblk) - enddo ! ij - enddo - !$OMP END PARALLEL DO - elseif (precond_type == 'pgmres') then ! PGMRES (Jacobi-preconditioned GMRES) - ! Initialize preconditioned vector to 0 ! TODO: try with wx = vx or vx/diagx - wx = c0 - wy = c0 - tolerance = reltol_pgmres - maxinner = dim_pgmres - maxouter = maxits_pgmres - call pgmres (zetax2, etax2 , & - Cb , vrel , & - umassdti , & - halo_info_mask , & - vx , vy , & - diagx , diagy , & - tolerance, maxinner, & - maxouter , & - wx , wy , & - nbiter) - else - call abort_ice(error_message='wrong preconditioner in ' // subname, & - file=__FILE__, line=__LINE__) - endif - end subroutine precondition - -!======================================================================= - -! Generic routine to orthogonalize a vector (arnoldi_basis_[xy](:, :, :, nextit)) -! against a set of vectors (arnoldi_basis_[xy](:, :, :, 1:initer)) -! -! authors: Philippe Blain, ECCC - - subroutine orthogonalize(ortho_type , initer , & - nextit , maxinner , & - arnoldi_basis_x, arnoldi_basis_y, & - hessenberg) - - character(len=*), intent(in) :: & - ortho_type ! type of orthogonalization - - integer (kind=int_kind), intent(in) :: & - initer , & ! inner (Arnoldi) loop counter - nextit , & ! nextit == initer+1 - maxinner ! Restart the method every maxinner inner iterations - - real (kind=dbl_kind), dimension(nx_block, ny_block, max_blocks, maxinner+1), intent(inout) :: & - arnoldi_basis_x , & ! arnoldi basis (x components) - arnoldi_basis_y ! arnoldi basis (y components) - - real (kind=dbl_kind), dimension(maxinner+1, maxinner), intent(inout) :: & - hessenberg ! system matrix of the Hessenberg (least squares) system - - ! local variables - - integer (kind=int_kind) :: & - it , & ! reusable loop counter - iblk , & ! block index - ij , & ! compressed index - i, j ! grid indices - - character(len=*), parameter :: subname = '(orthogonalize)' - - if (trim(ortho_type) == 'cgs') then ! Classical Gram-Schmidt - ! Classical Gram-Schmidt orthogonalisation process - ! First loop of Gram-Schmidt (compute coefficients) - do it = 1, initer - hessenberg(it, initer) = global_dot_product(nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - arnoldi_basis_x(:,:,:, it) , & - arnoldi_basis_y(:,:,:, it) , & - arnoldi_basis_x(:,:,:, nextit), & - arnoldi_basis_y(:,:,:, nextit)) - end do - ! Second loop of Gram-Schmidt (orthonormalize) - do it = 1, initer - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - - arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit) & - - hessenberg(it,initer) * arnoldi_basis_x(i, j, iblk, it) - arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit) & - - hessenberg(it,initer) * arnoldi_basis_y(i, j, iblk, it) - enddo ! ij - enddo - !$OMP END PARALLEL DO - end do - elseif (trim(ortho_type) == 'mgs') then ! Modified Gram-Schmidt - ! Modified Gram-Schmidt orthogonalisation process - do it = 1, initer - hessenberg(it, initer) = global_dot_product(nx_block, ny_block, & - icellU , & - indxUi , indxUj , & - arnoldi_basis_x(:,:,:, it) , & - arnoldi_basis_y(:,:,:, it) , & - arnoldi_basis_x(:,:,:, nextit), & - arnoldi_basis_y(:,:,:, nextit)) - - !$OMP PARALLEL DO PRIVATE(iblk,ij,i,j) - do iblk = 1, nblocks - do ij = 1, icellU(iblk) - i = indxUi(ij, iblk) - j = indxUj(ij, iblk) - - arnoldi_basis_x(i, j, iblk, nextit) = arnoldi_basis_x(i, j, iblk, nextit) & - - hessenberg(it,initer) * arnoldi_basis_x(i, j, iblk, it) - arnoldi_basis_y(i, j, iblk, nextit) = arnoldi_basis_y(i, j, iblk, nextit) & - - hessenberg(it,initer) * arnoldi_basis_y(i, j, iblk, it) - enddo ! ij - enddo - !$OMP END PARALLEL DO - end do - else - call abort_ice(error_message='wrong orthonalization in ' // subname, & - file=__FILE__, line=__LINE__) - endif - - end subroutine orthogonalize - -!======================================================================= - -! Check if value A is close to zero, up to machine precision -! -!author -! Stéphane Gaudreault, ECCC -- June 2014 -! -!revision -! v4-80 - Gaudreault S. - gfortran compatibility -! 2019 - Philippe Blain, ECCC - converted to CICE standards - - logical function almost_zero(A) result(retval) - - real (kind=dbl_kind), intent(in) :: A - - ! local variables - - character(len=*), parameter :: subname = '(almost_zero)' - - integer (kind=int8_kind) :: aBit - integer (kind=int8_kind), parameter :: two_complement = int(Z'80000000', kind=int8_kind) - aBit = 0 - aBit = transfer(A, aBit) - if (aBit < 0) then - aBit = two_complement - aBit - end if - ! lexicographic order test with a tolerance of 1 adjacent float - retval = (abs(aBit) <= 1) - - end function almost_zero - -!======================================================================= - - end module ice_dyn_vp - -!======================================================================= diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 deleted file mode 100644 index 45ae58d8b..000000000 --- a/cicecore/cicedynB/general/ice_init.F90 +++ /dev/null @@ -1,3208 +0,0 @@ -!======================================================================= - -! parameter and variable initializations -! -! authors Elizabeth C. Hunke and William H. Lipscomb, LANL -! C. M. Bitz, UW -! -! 2004 WHL: Block structure added -! 2006 ECH: Added namelist variables, warnings. -! Replaced old default initial ice conditions with 3.14 version. -! Converted to free source form (F90). - - module ice_init - - use ice_kinds_mod - use ice_communicate, only: my_task, master_task, ice_barrier - use ice_constants, only: c0, c1, c2, c3, c5, c12, p2, p3, p5, p75, p166, & - cm_to_m - use ice_exit, only: abort_ice - use ice_fileunits, only: nu_nml, nu_diag, nu_diag_set, nml_filename, diag_type, & - ice_stdout, get_fileunit, release_fileunit, bfbflag, flush_fileunit, & - ice_IOUnitsMinUnit, ice_IOUnitsMaxUnit -#ifdef CESMCOUPLED - use ice_fileunits, only: inst_suffix -#endif - use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted - use icepack_intfc, only: icepack_aggregate - use icepack_intfc, only: icepack_init_trcr - use icepack_intfc, only: icepack_init_parameters - use icepack_intfc, only: icepack_init_tracer_flags - use icepack_intfc, only: icepack_init_tracer_sizes - use icepack_intfc, only: icepack_query_tracer_flags - use icepack_intfc, only: icepack_query_tracer_sizes - use icepack_intfc, only: icepack_query_tracer_indices - use icepack_intfc, only: icepack_query_parameters - - implicit none - private - - character(len=char_len_long), public :: & - ice_ic ! method of ice cover initialization - ! 'internal' => set from ice_data_ namelist - ! 'none' => no ice - ! filename => read file - - public :: input_data, init_state, set_state_var - -!======================================================================= - - contains - -!======================================================================= - -! Namelist variables, set to default values; may be altered -! at run time -! -! author Elizabeth C. Hunke, LANL - - subroutine input_data - - use ice_broadcast, only: broadcast_scalar, broadcast_array - use ice_diagnostics, only: diag_file, print_global, print_points, latpnt, lonpnt, & - debug_model, debug_model_step, debug_model_task, & - debug_model_i, debug_model_j, debug_model_iblk - use ice_domain, only: close_boundaries, orca_halogrid - use ice_domain_size, only: ncat, nilyr, nslyr, nblyr, nfsd, nfreq, & - n_iso, n_aero, n_zaero, n_algae, & - n_doc, n_dic, n_don, n_fed, n_fep, & - max_nstrm - use ice_calendar, only: year_init, month_init, day_init, sec_init, & - istep0, histfreq, histfreq_n, histfreq_base, & - dumpfreq, dumpfreq_n, diagfreq, dumpfreq_base, & - npt, dt, ndtd, days_per_year, use_leap_years, & - write_ic, dump_last, npt_unit - use ice_arrays_column, only: oceanmixed_ice - use ice_restart_column, only: restart_age, restart_FY, restart_lvl, & - restart_pond_lvl, restart_pond_topo, restart_aero, & - restart_fsd, restart_iso, restart_snow - use ice_restart_shared, only: & - restart, restart_ext, restart_coszen, restart_dir, restart_file, pointer_file, & - runid, runtype, use_restart_time, restart_format, lcdf64 - use ice_history_shared, only: hist_avg, history_dir, history_file, & - incond_dir, incond_file, version_name, & - history_precision, history_format - use ice_flux, only: update_ocn_f, l_mpond_fresh - use ice_flux, only: default_season - use ice_flux_bgc, only: cpl_bgc - use ice_forcing, only: & - ycycle, fyear_init, debug_forcing, & - atm_data_type, atm_data_dir, precip_units, rotate_wind, & - atm_data_format, ocn_data_format, & - bgc_data_type, & - ocn_data_type, ocn_data_dir, wave_spec_file, & - oceanmixed_file, restore_ocn, trestore, & - ice_data_type, ice_data_conc, ice_data_dist, & - snw_filename, & - snw_tau_fname, snw_kappa_fname, snw_drdt0_fname, & - snw_rhos_fname, snw_Tgrd_fname, snw_T_fname - use ice_arrays_column, only: bgc_data_dir, fe_data_type - use ice_grid, only: grid_file, gridcpl_file, kmt_file, & - bathymetry_file, use_bathymetry, & - bathymetry_format, kmt_type, & - grid_type, grid_format, & - grid_ice, grid_ice_thrm, grid_ice_dynu, grid_ice_dynv, & - grid_ocn, grid_ocn_thrm, grid_ocn_dynu, grid_ocn_dynv, & - grid_atm, grid_atm_thrm, grid_atm_dynu, grid_atm_dynv, & - dxrect, dyrect, dxscale, dyscale, scale_dxdy, & - lonrefrect, latrefrect, pgl_global_ext - use ice_dyn_shared, only: ndte, kdyn, revised_evp, yield_curve, & - evp_algorithm, visc_method, & - seabed_stress, seabed_stress_method, & - k1, k2, alphab, threshold_hw, Ktens, & - e_yieldcurve, e_plasticpot, coriolis, & - ssh_stress, kridge, brlx, arlx, & - deltaminEVP, deltaminVP, capping, & - elasticDamp - - use ice_dyn_vp, only: maxits_nonlin, precond, dim_fgmres, dim_pgmres, maxits_fgmres, & - maxits_pgmres, monitor_nonlin, monitor_fgmres, & - monitor_pgmres, reltol_nonlin, reltol_fgmres, reltol_pgmres, & - algo_nonlin, fpfunc_andacc, dim_andacc, reltol_andacc, & - damping_andacc, start_andacc, use_mean_vrel, ortho_type - use ice_transport_driver, only: advection, conserv_check - use ice_restoring, only: restore_ice - use ice_timers, only: timer_stats - use ice_memusage, only: memory_stats -#ifdef CESMCOUPLED - use shr_file_mod, only: shr_file_setIO -#endif - - ! local variables - - integer (kind=int_kind) :: & - nml_error, & ! namelist i/o error flag - n ! loop index - -#ifdef CESMCOUPLED - logical :: exists -#endif - - real (kind=dbl_kind) :: ustar_min, albicev, albicei, albsnowv, albsnowi, & - ahmax, R_ice, R_pnd, R_snw, dT_mlt, rsnw_mlt, emissivity, & - mu_rdg, hs0, dpscale, rfracmin, rfracmax, pndaspect, hs1, hp1, & - a_rapid_mode, Rac_rapid_mode, aspect_rapid_mode, dSdt_slow_mode, & - phi_c_slow_mode, phi_i_mushy, kalg, atmiter_conv, Pstar, Cstar, & - sw_frac, sw_dtemp, floediam, hfrazilmin, iceruf, iceruf_ocn, & - rsnw_fall, rsnw_tmax, rhosnew, rhosmin, rhosmax, & - windmin, drhosdwind, snwlvlfac - - integer (kind=int_kind) :: ktherm, kstrength, krdg_partic, krdg_redist, natmiter, & - kitd, kcatbound, ktransport - - character (len=char_len) :: shortwave, albedo_type, conduct, fbot_xfer_type, & - tfrz_option, frzpnd, atmbndy, wave_spec_type, snwredist, snw_aging_table, & - capping_method - - logical (kind=log_kind) :: calc_Tsfc, formdrag, highfreq, calc_strair, wave_spec, & - sw_redist, calc_dragio, use_smliq_pnd, snwgrain - - logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_pond - logical (kind=log_kind) :: tr_iso, tr_aero, tr_fsd, tr_snow - logical (kind=log_kind) :: tr_pond_lvl, tr_pond_topo - integer (kind=int_kind) :: numin, numax ! unit number limits - - integer (kind=int_kind) :: rplvl, rptopo - real (kind=dbl_kind) :: Cf, ksno, puny - character (len=char_len) :: abort_list - character (len=128) :: tmpstr2 - - character(len=*), parameter :: subname='(input_data)' - - !----------------------------------------------------------------- - ! Namelist variables - !----------------------------------------------------------------- - - namelist /setup_nml/ & - days_per_year, use_leap_years, istep0, npt_unit, & - dt, npt, ndtd, numin, & - runtype, runid, bfbflag, numax, & - ice_ic, restart, restart_dir, restart_file, & - restart_ext, use_restart_time, restart_format, lcdf64, & - pointer_file, dumpfreq, dumpfreq_n, dump_last, & - diagfreq, diag_type, diag_file, history_format,& - print_global, print_points, latpnt, lonpnt, & - debug_forcing, histfreq, histfreq_n, hist_avg, & - history_dir, history_file, history_precision, cpl_bgc, & - histfreq_base, dumpfreq_base, timer_stats, memory_stats, & - conserv_check, debug_model, debug_model_step, & - debug_model_i, debug_model_j, debug_model_iblk, debug_model_task, & - year_init, month_init, day_init, sec_init, & - write_ic, incond_dir, incond_file, version_name - - namelist /grid_nml/ & - grid_format, grid_type, grid_file, kmt_file, & - bathymetry_file, use_bathymetry, nfsd, bathymetry_format, & - ncat, nilyr, nslyr, nblyr, & - kcatbound, gridcpl_file, dxrect, dyrect, & - dxscale, dyscale, lonrefrect, latrefrect, & - scale_dxdy, & - close_boundaries, orca_halogrid, grid_ice, kmt_type, & - grid_atm, grid_ocn - - namelist /tracer_nml/ & - tr_iage, restart_age, & - tr_FY, restart_FY, & - tr_lvl, restart_lvl, & - tr_pond_lvl, restart_pond_lvl, & - tr_pond_topo, restart_pond_topo, & - tr_snow, restart_snow, & - tr_iso, restart_iso, & - tr_aero, restart_aero, & - tr_fsd, restart_fsd, & - n_iso, n_aero, n_zaero, n_algae, & - n_doc, n_dic, n_don, n_fed, n_fep - - namelist /thermo_nml/ & - kitd, ktherm, conduct, ksno, & - a_rapid_mode, Rac_rapid_mode, aspect_rapid_mode, & - dSdt_slow_mode, phi_c_slow_mode, phi_i_mushy, & - floediam, hfrazilmin - - namelist /dynamics_nml/ & - kdyn, ndte, revised_evp, yield_curve, & - evp_algorithm, elasticDamp, & - brlx, arlx, ssh_stress, & - advection, coriolis, kridge, ktransport, & - kstrength, krdg_partic, krdg_redist, mu_rdg, & - e_yieldcurve, e_plasticpot, visc_method, & - maxits_nonlin, precond, dim_fgmres, & - dim_pgmres, maxits_fgmres, maxits_pgmres, monitor_nonlin, & - monitor_fgmres, monitor_pgmres, reltol_nonlin, reltol_fgmres, & - reltol_pgmres, algo_nonlin, dim_andacc, reltol_andacc, & - damping_andacc, start_andacc, fpfunc_andacc, use_mean_vrel, & - ortho_type, seabed_stress, seabed_stress_method, & - k1, k2, alphab, threshold_hw, & - deltaminEVP, deltaminVP, capping_method, & - Cf, Pstar, Cstar, Ktens - - namelist /shortwave_nml/ & - shortwave, albedo_type, & - albicev, albicei, albsnowv, albsnowi, & - ahmax, R_ice, R_pnd, R_snw, & - sw_redist, sw_frac, sw_dtemp, & - dT_mlt, rsnw_mlt, kalg - - namelist /ponds_nml/ & - hs0, dpscale, frzpnd, & - rfracmin, rfracmax, pndaspect, hs1, & - hp1 - - namelist /snow_nml/ & - snwredist, snwgrain, rsnw_fall, rsnw_tmax, & - rhosnew, rhosmin, rhosmax, snwlvlfac, & - windmin, drhosdwind, use_smliq_pnd, snw_aging_table,& - snw_filename, snw_rhos_fname, snw_Tgrd_fname,snw_T_fname, & - snw_tau_fname, snw_kappa_fname, snw_drdt0_fname - - namelist /forcing_nml/ & - formdrag, atmbndy, calc_strair, calc_Tsfc, & - highfreq, natmiter, atmiter_conv, calc_dragio, & - ustar_min, emissivity, iceruf, iceruf_ocn, & - fbot_xfer_type, update_ocn_f, l_mpond_fresh, tfrz_option, & - oceanmixed_ice, restore_ice, restore_ocn, trestore, & - precip_units, default_season, wave_spec_type,nfreq, & - atm_data_type, ocn_data_type, bgc_data_type, fe_data_type, & - ice_data_type, ice_data_conc, ice_data_dist, & - fyear_init, ycycle, wave_spec_file,restart_coszen, & - atm_data_dir, ocn_data_dir, bgc_data_dir, & - atm_data_format, ocn_data_format, rotate_wind, & - oceanmixed_file - - !----------------------------------------------------------------- - ! default values - !----------------------------------------------------------------- - - abort_list = "" - - call icepack_query_parameters(puny_out=puny) -! nu_diag not yet defined -! call icepack_warnings_flush(nu_diag) -! if (icepack_warnings_aborted()) call abort_ice(error_message=subname//'Icepack Abort0', & -! file=__FILE__, line=__LINE__) - - days_per_year = 365 ! number of days in a year - use_leap_years= .false.! if true, use leap years (Feb 29) - year_init = 0 ! initial year - month_init = 1 ! initial month - day_init = 1 ! initial day - sec_init = 0 ! initial second - istep0 = 0 ! no. of steps taken in previous integrations, - ! real (dumped) or imagined (to set calendar) -#ifndef CESMCOUPLED - dt = 3600.0_dbl_kind ! time step, s -#endif - numin = 11 ! min allowed unit number - numax = 99 ! max allowed unit number - npt = 99999 ! total number of time steps (dt) - npt_unit = '1' ! units of npt 'y', 'm', 'd', 's', '1' - diagfreq = 24 ! how often diag output is written - debug_model = .false. ! debug output - debug_model_step = 0 ! debug model after this step number - debug_model_i = -1 ! debug model local i index - debug_model_j = -1 ! debug model local j index - debug_model_iblk = -1 ! debug model local iblk number - debug_model_task = -1 ! debug model local task number - print_points = .false. ! if true, print point data - print_global = .true. ! if true, print global diagnostic data - timer_stats = .false. ! if true, print out detailed timer statistics - memory_stats = .false. ! if true, print out memory information - bfbflag = 'off' ! off = optimized - diag_type = 'stdout' - diag_file = 'ice_diag.d' - histfreq(1) = '1' ! output frequency option for different streams - histfreq(2) = 'h' ! output frequency option for different streams - histfreq(3) = 'd' ! output frequency option for different streams - histfreq(4) = 'm' ! output frequency option for different streams - histfreq(5) = 'y' ! output frequency option for different streams - histfreq_n(:) = 1 ! output frequency - histfreq_base = 'zero' ! output frequency reference date - hist_avg = .true. ! if true, write time-averages (not snapshots) - history_format = 'default' ! history file format - history_dir = './' ! write to executable dir for default - history_file = 'iceh' ! history file name prefix - history_precision = 4 ! precision of history files - write_ic = .false. ! write out initial condition - cpl_bgc = .false. ! couple bgc thru driver - incond_dir = history_dir ! write to history dir for default - incond_file = 'iceh_ic'! file prefix - dumpfreq='y' ! restart frequency option - dumpfreq_n = 1 ! restart frequency - dumpfreq_base = 'init' ! restart frequency reference date - dump_last = .false. ! write restart on last time step - restart_dir = './' ! write to executable dir for default - restart_file = 'iced' ! restart file name prefix - restart_ext = .false. ! if true, read/write ghost cells - restart_coszen = .false. ! if true, read/write coszen - pointer_file = 'ice.restart_file' - restart_format = 'default' ! restart file format - lcdf64 = .false. ! 64 bit offset for netCDF - ice_ic = 'default' ! latitude and sst-dependent - grid_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) - grid_type = 'rectangular'! define rectangular grid internally - grid_file = 'unknown_grid_file' - grid_ice = 'B' ! underlying grid system - grid_atm = 'A' ! underlying atm forcing/coupling grid - grid_ocn = 'A' ! underlying atm forcing/coupling grid - gridcpl_file = 'unknown_gridcpl_file' - orca_halogrid = .false. ! orca haloed grid - bathymetry_file = 'unknown_bathymetry_file' - bathymetry_format = 'default' - use_bathymetry = .false. - kmt_type = 'file' - kmt_file = 'unknown_kmt_file' - version_name = 'unknown_version_name' - ncat = 0 ! number of ice thickness categories - nfsd = 1 ! number of floe size categories (1 = default) - nilyr = 0 ! number of vertical ice layers - nslyr = 0 ! number of vertical snow layers - nblyr = 0 ! number of bio layers - - kitd = 1 ! type of itd conversions (0 = delta, 1 = linear) - kcatbound = 1 ! category boundary formula (0 = old, 1 = new, etc) - kdyn = 1 ! type of dynamics (-1, 0 = off, 1 = evp, 2 = eap, 3 = vp) - ndtd = 1 ! dynamic time steps per thermodynamic time step - ndte = 120 ! subcycles per dynamics timestep: ndte=dt_dyn/dte - evp_algorithm = 'standard_2d' ! EVP kernel (standard_2d=standard cice evp; shared_mem_1d=1d shared memory and no mpi - elasticDamp = 0.36_dbl_kind ! coefficient for calculating the parameter E - pgl_global_ext = .false. ! if true, init primary grid lengths (global ext.) - brlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared - arlx = 300.0_dbl_kind ! revised_evp values. Otherwise overwritten in ice_dyn_shared - revised_evp = .false. ! if true, use revised procedure for evp dynamics - yield_curve = 'ellipse' ! yield curve - kstrength = 1 ! 1 = Rothrock 75 strength, 0 = Hibler 79 - Pstar = 2.75e4_dbl_kind ! constant in Hibler strength formula (kstrength = 0) - Cstar = 20._dbl_kind ! constant in Hibler strength formula (kstrength = 0) - krdg_partic = 1 ! 1 = new participation, 0 = Thorndike et al 75 - krdg_redist = 1 ! 1 = new redistribution, 0 = Hibler 80 - mu_rdg = 3 ! e-folding scale of ridged ice, krdg_partic=1 (m^0.5) - Cf = 17.0_dbl_kind ! ratio of ridging work to PE change in ridging - ksno = 0.3_dbl_kind ! snow thermal conductivity - dxrect = 0.0_dbl_kind ! user defined grid spacing in cm in x direction - dyrect = 0.0_dbl_kind ! user defined grid spacing in cm in y direction - lonrefrect = -156.50_dbl_kind ! lower left corner lon for rectgrid - latrefrect = 71.35_dbl_kind ! lower left corner lat for rectgrid - scale_dxdy = .false. ! apply dxscale, dyscale to rectgrid - dxscale = 1.0_dbl_kind ! user defined rectgrid x-grid scale factor (e.g., 1.02) - dyscale = 1.0_dbl_kind ! user defined rectgrid y-grid scale factor (e.g., 1.02) - close_boundaries = .false. ! true = set land on edges of grid - seabed_stress= .false. ! if true, seabed stress for landfast is on - seabed_stress_method = 'LKD'! LKD = Lemieux et al 2015, probabilistic = Dupont et al. in prep - k1 = 7.5_dbl_kind ! 1st free parameter for landfast parameterization - k2 = 15.0_dbl_kind ! 2nd free parameter (N/m^3) for landfast parametrization - alphab = 20.0_dbl_kind ! alphab=Cb factor in Lemieux et al 2015 - threshold_hw = 30.0_dbl_kind ! max water depth for grounding - Ktens = 0.0_dbl_kind ! T=Ktens*P (tensile strength: see Konig and Holland, 2010) - e_yieldcurve = 2.0_dbl_kind ! VP aspect ratio of elliptical yield curve - e_plasticpot = 2.0_dbl_kind ! VP aspect ratio of elliptical plastic potential - visc_method = 'avg_zeta' ! calc viscosities at U point: avg_strength, avg_zeta - deltaminEVP = 1e-11_dbl_kind ! minimum delta for viscosities (EVP, Hunke 2001) - deltaminVP = 2e-9_dbl_kind ! minimum delta for viscosities (VP, Hibler 1979) - capping_method = 'max' ! method for capping of viscosities (max=Hibler 1979,sum=Kreyscher2000) - maxits_nonlin = 10 ! max nb of iteration for nonlinear solver - precond = 'pgmres' ! preconditioner for fgmres: 'ident' (identity), 'diag' (diagonal), - ! 'pgmres' (Jacobi-preconditioned GMRES) - dim_fgmres = 50 ! size of fgmres Krylov subspace - dim_pgmres = 5 ! size of pgmres Krylov subspace - maxits_fgmres = 50 ! max nb of iteration for fgmres - maxits_pgmres = 5 ! max nb of iteration for pgmres - monitor_nonlin = .false. ! print nonlinear residual norm - monitor_fgmres = .false. ! print fgmres residual norm - monitor_pgmres = .false. ! print pgmres residual norm - ortho_type = 'mgs' ! orthogonalization procedure 'cgs' or 'mgs' - reltol_nonlin = 1e-8_dbl_kind ! nonlinear stopping criterion: reltol_nonlin*res(k=0) - reltol_fgmres = 1e-1_dbl_kind ! fgmres stopping criterion: reltol_fgmres*res(k) - reltol_pgmres = 1e-6_dbl_kind ! pgmres stopping criterion: reltol_pgmres*res(k) - algo_nonlin = 'picard' ! nonlinear algorithm: 'picard' (Picard iteration), 'anderson' (Anderson acceleration) - fpfunc_andacc = 1 ! fixed point function for Anderson acceleration: - ! 1: g(x) = FMGRES(A(x),b(x)), 2: g(x) = x - A(x)x + b(x) - dim_andacc = 5 ! size of Anderson minimization matrix (number of saved previous residuals) - reltol_andacc = 1e-6_dbl_kind ! relative tolerance for Anderson acceleration - damping_andacc = 0 ! damping factor for Anderson acceleration - start_andacc = 0 ! acceleration delay factor (acceleration starts at this iteration) - use_mean_vrel = .true. ! use mean of previous 2 iterates to compute vrel - advection = 'remap' ! incremental remapping transport scheme - conserv_check = .false. ! tracer conservation check - shortwave = 'ccsm3' ! 'ccsm3' or 'dEdd' (delta-Eddington) - albedo_type = 'ccsm3' ! 'ccsm3' or 'constant' - ktherm = 1 ! -1 = OFF, 1 = BL99, 2 = mushy thermo - conduct = 'bubbly' ! 'MU71' or 'bubbly' (Pringle et al 2007) - coriolis = 'latitude' ! latitude dependent, or 'constant' - ssh_stress = 'geostrophic' ! 'geostrophic' or 'coupled' - kridge = 1 ! -1 = off, 1 = on - ktransport = 1 ! -1 = off, 1 = on - calc_Tsfc = .true. ! calculate surface temperature - update_ocn_f = .false. ! include fresh water and salt fluxes for frazil - ustar_min = 0.005 ! minimum friction velocity for ocean heat flux (m/s) - iceruf = 0.0005_dbl_kind ! ice surface roughness at atmosphere interface (m) - iceruf_ocn = 0.03_dbl_kind ! under-ice roughness (m) - calc_dragio = .false. ! compute dragio from iceruf_ocn and thickness of first ocean level - emissivity = 0.985 ! emissivity of snow and ice - l_mpond_fresh = .false. ! logical switch for including meltpond freshwater - ! flux feedback to ocean model - fbot_xfer_type = 'constant' ! transfer coefficient type for ocn heat flux - R_ice = 0.00_dbl_kind ! tuning parameter for sea ice - R_pnd = 0.00_dbl_kind ! tuning parameter for ponded sea ice - R_snw = 1.50_dbl_kind ! tuning parameter for snow over sea ice - dT_mlt = 1.5_dbl_kind ! change in temp to give non-melt to melt change - ! in snow grain radius - rsnw_mlt = 1500._dbl_kind ! maximum melting snow grain radius - kalg = 0.60_dbl_kind ! algae absorption coefficient for 0.5 m thick layer - ! 0.5 m path of 75 mg Chl a / m2 - hp1 = 0.01_dbl_kind ! critical pond lid thickness for topo ponds - hs0 = 0.03_dbl_kind ! snow depth for transition to bare sea ice (m) - hs1 = 0.03_dbl_kind ! snow depth for transition to bare pond ice (m) - dpscale = c1 ! alter e-folding time scale for flushing - frzpnd = 'cesm' ! melt pond refreezing parameterization - rfracmin = 0.15_dbl_kind ! minimum retained fraction of meltwater - rfracmax = 0.85_dbl_kind ! maximum retained fraction of meltwater - pndaspect = 0.8_dbl_kind ! ratio of pond depth to area fraction - snwredist = 'none' ! type of snow redistribution - snw_aging_table = 'test' ! snow aging lookup table - snw_filename = 'unknown' ! snowtable filename - snw_tau_fname = 'unknown' ! snowtable file tau fieldname - snw_kappa_fname = 'unknown' ! snowtable file kappa fieldname - snw_drdt0_fname = 'unknown' ! snowtable file drdt0 fieldname - snw_rhos_fname = 'unknown' ! snowtable file rhos fieldname - snw_Tgrd_fname = 'unknown' ! snowtable file Tgrd fieldname - snw_T_fname = 'unknown' ! snowtable file T fieldname - snwgrain = .false. ! snow metamorphosis - use_smliq_pnd = .false. ! use liquid in snow for ponds - rsnw_fall = 100.0_dbl_kind ! radius of new snow (10^-6 m) ! advanced snow physics: 54.526 x 10^-6 m - rsnw_tmax = 1500.0_dbl_kind ! maximum snow radius (10^-6 m) - rhosnew = 100.0_dbl_kind ! new snow density (kg/m^3) - rhosmin = 100.0_dbl_kind ! minimum snow density (kg/m^3) - rhosmax = 450.0_dbl_kind ! maximum snow density (kg/m^3) - windmin = 10.0_dbl_kind ! minimum wind speed to compact snow (m/s) - drhosdwind= 27.3_dbl_kind ! wind compaction factor for snow (kg s/m^4) - snwlvlfac = 0.3_dbl_kind ! fractional increase in snow depth for bulk redistribution - albicev = 0.78_dbl_kind ! visible ice albedo for h > ahmax - albicei = 0.36_dbl_kind ! near-ir ice albedo for h > ahmax - albsnowv = 0.98_dbl_kind ! cold snow albedo, visible - albsnowi = 0.70_dbl_kind ! cold snow albedo, near IR - ahmax = 0.3_dbl_kind ! thickness above which ice albedo is constant (m) - atmbndy = 'similarity' ! Atm boundary layer: 'similarity', 'constant' or 'mixed' - default_season = 'winter' ! default forcing data, if data is not read in - fyear_init = 1900 ! first year of forcing cycle - ycycle = 1 ! number of years in forcing cycle - atm_data_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) - atm_data_type = 'default' - atm_data_dir = ' ' - rotate_wind = .true. ! rotate wind/stress composants to computational grid orientation - calc_strair = .true. ! calculate wind stress - formdrag = .false. ! calculate form drag - highfreq = .false. ! calculate high frequency RASM coupling - natmiter = 5 ! number of iterations for atm boundary layer calcs - atmiter_conv = c0 ! ustar convergence criteria - precip_units = 'mks' ! 'mm_per_month' or - ! 'mm_per_sec' = 'mks' = kg/m^2 s - tfrz_option = 'mushy' ! freezing temp formulation - oceanmixed_ice = .false. ! if true, use internal ocean mixed layer - wave_spec_type = 'none' ! type of wave spectrum forcing - nfreq = 25 ! number of wave frequencies - wave_spec_file = ' ' ! wave forcing file name - ocn_data_format = 'bin' ! file format ('bin'=binary or 'nc'=netcdf) - bgc_data_type = 'default' - fe_data_type = 'default' - ice_data_type = 'default' ! used by some tests to initialize ice state (overall type and mask) - ice_data_conc = 'default' ! used by some tests to initialize ice state (concentration) - ice_data_dist = 'default' ! used by some tests to initialize ice state (distribution) - bgc_data_dir = 'unknown_bgc_data_dir' - ocn_data_type = 'default' - ocn_data_dir = 'unknown_ocn_data_dir' - oceanmixed_file = 'unknown_oceanmixed_file' ! ocean forcing data - restore_ocn = .false. ! restore sst if true - trestore = 90 ! restoring timescale, days (0 instantaneous) - restore_ice = .false. ! restore ice state on grid edges if true - debug_forcing = .false. ! true writes diagnostics for input forcing - - latpnt(1) = 90._dbl_kind ! latitude of diagnostic point 1 (deg) - lonpnt(1) = 0._dbl_kind ! longitude of point 1 (deg) - latpnt(2) = -65._dbl_kind ! latitude of diagnostic point 2 (deg) - lonpnt(2) = -45._dbl_kind ! longitude of point 2 (deg) - -#ifndef CESMCOUPLED - runid = 'unknown' ! run ID used in CESM and for machine 'bering' - runtype = 'initial' ! run type: 'initial', 'continue' - restart = .false. ! if true, read ice state from restart file - use_restart_time = .false. ! if true, use time info written in file -#endif - - ! extra tracers - tr_iage = .false. ! ice age - restart_age = .false. ! ice age restart - tr_FY = .false. ! ice age - restart_FY = .false. ! ice age restart - tr_lvl = .false. ! level ice - restart_lvl = .false. ! level ice restart - tr_pond_lvl = .false. ! level-ice melt ponds - restart_pond_lvl = .false. ! melt ponds restart - tr_pond_topo = .false. ! explicit melt ponds (topographic) - restart_pond_topo = .false. ! melt ponds restart - tr_snow = .false. ! advanced snow physics - restart_snow = .false. ! advanced snow physics restart - tr_iso = .false. ! isotopes - restart_iso = .false. ! isotopes restart - tr_aero = .false. ! aerosols - restart_aero = .false. ! aerosols restart - tr_fsd = .false. ! floe size distribution - restart_fsd = .false. ! floe size distribution restart - - n_iso = 0 - n_aero = 0 - n_zaero = 0 - n_algae = 0 - n_doc = 0 - n_dic = 0 - n_don = 0 - n_fed = 0 - n_fep = 0 - - ! mushy layer gravity drainage physics - a_rapid_mode = 0.5e-3_dbl_kind ! channel radius for rapid drainage mode (m) - Rac_rapid_mode = 10.0_dbl_kind ! critical Rayleigh number - aspect_rapid_mode = 1.0_dbl_kind ! aspect ratio (larger is wider) - dSdt_slow_mode = -1.5e-7_dbl_kind ! slow mode drainage strength (m s-1 K-1) - phi_c_slow_mode = 0.05_dbl_kind ! critical liquid fraction porosity cutoff - phi_i_mushy = 0.85_dbl_kind ! liquid fraction of congelation ice - - floediam = 300.0_dbl_kind ! min thickness of new frazil ice (m) - hfrazilmin = 0.05_dbl_kind ! effective floe diameter (m) - - ! shortwave redistribution in the thermodynamics - sw_redist = .false. - sw_frac = 0.9_dbl_kind - sw_dtemp = 0.02_dbl_kind - - !----------------------------------------------------------------- - ! read from input file - !----------------------------------------------------------------- - -#ifdef CESMCOUPLED - nml_filename = 'ice_in'//trim(inst_suffix) -#endif - - if (my_task == master_task) then - - call get_fileunit(nu_nml) - open (nu_nml, file=trim(nml_filename), status='old',iostat=nml_error) - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: open file '// & - trim(nml_filename), & - file=__FILE__, line=__LINE__) - endif - - write(nu_diag,*) subname,' Reading setup_nml' - rewind(unit=nu_nml, iostat=nml_error) - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: setup_nml rewind ', & - file=__FILE__, line=__LINE__) - endif - nml_error = 1 - do while (nml_error > 0) - read(nu_nml, nml=setup_nml,iostat=nml_error) - end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: setup_nml reading ', & - file=__FILE__, line=__LINE__) - endif - - write(nu_diag,*) subname,' Reading grid_nml' - rewind(unit=nu_nml, iostat=nml_error) - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: grid_nml rewind ', & - file=__FILE__, line=__LINE__) - endif - nml_error = 1 - do while (nml_error > 0) - read(nu_nml, nml=grid_nml,iostat=nml_error) - end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: grid_nml reading ', & - file=__FILE__, line=__LINE__) - endif - - write(nu_diag,*) subname,' Reading tracer_nml' - rewind(unit=nu_nml, iostat=nml_error) - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: tracer_nml rewind ', & - file=__FILE__, line=__LINE__) - endif - nml_error = 1 - do while (nml_error > 0) - read(nu_nml, nml=tracer_nml,iostat=nml_error) - end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: tracer_nml reading ', & - file=__FILE__, line=__LINE__) - endif - - write(nu_diag,*) subname,' Reading thermo_nml' - rewind(unit=nu_nml, iostat=nml_error) - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: thermo_nml rewind ', & - file=__FILE__, line=__LINE__) - endif - nml_error = 1 - do while (nml_error > 0) - read(nu_nml, nml=thermo_nml,iostat=nml_error) - end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: thermo_nml reading ', & - file=__FILE__, line=__LINE__) - endif - - write(nu_diag,*) subname,' Reading dynamics_nml' - rewind(unit=nu_nml, iostat=nml_error) - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: dynamics_nml rewind ', & - file=__FILE__, line=__LINE__) - endif - nml_error = 1 - do while (nml_error > 0) - read(nu_nml, nml=dynamics_nml,iostat=nml_error) - end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: dynamics_nml reading ', & - file=__FILE__, line=__LINE__) - endif - - write(nu_diag,*) subname,' Reading shortwave_nml' - rewind(unit=nu_nml, iostat=nml_error) - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: shortwave_nml rewind ', & - file=__FILE__, line=__LINE__) - endif - nml_error = 1 - do while (nml_error > 0) - read(nu_nml, nml=shortwave_nml,iostat=nml_error) - end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: shortwave_nml reading ', & - file=__FILE__, line=__LINE__) - endif - - write(nu_diag,*) subname,' Reading ponds_nml' - rewind(unit=nu_nml, iostat=nml_error) - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: ponds_nml rewind ', & - file=__FILE__, line=__LINE__) - endif - nml_error = 1 - do while (nml_error > 0) - read(nu_nml, nml=ponds_nml,iostat=nml_error) - end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: ponds_nml reading ', & - file=__FILE__, line=__LINE__) - endif - - write(nu_diag,*) subname,' Reading snow_nml' - rewind(unit=nu_nml, iostat=nml_error) - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: snow_nml rewind ', & - file=__FILE__, line=__LINE__) - endif - nml_error = 1 - do while (nml_error > 0) - read(nu_nml, nml=snow_nml,iostat=nml_error) - end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: snow_nml reading ', & - file=__FILE__, line=__LINE__) - endif - - write(nu_diag,*) subname,' Reading forcing_nml' - rewind(unit=nu_nml, iostat=nml_error) - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: forcing_nml rewind ', & - file=__FILE__, line=__LINE__) - endif - nml_error = 1 - do while (nml_error > 0) - read(nu_nml, nml=forcing_nml,iostat=nml_error) - end do - if (nml_error /= 0) then - call abort_ice(subname//'ERROR: forcing_nml reading ', & - file=__FILE__, line=__LINE__) - endif - - close(nu_nml) - call release_fileunit(nu_nml) - endif - - !----------------------------------------------------------------- - ! set up diagnostics output and resolve conflicts - !----------------------------------------------------------------- - -#ifdef CESMCOUPLED - ! Note in CESMCOUPLED mode diag_file is not utilized and - ! runid and runtype are obtained from the driver, not from the namelist - - if (my_task == master_task) then - history_file = trim(runid) // ".cice" // trim(inst_suffix) //".h" - restart_file = trim(runid) // ".cice" // trim(inst_suffix) //".r" - incond_file = trim(runid) // ".cice" // trim(inst_suffix) //".i" - ! Note by tcraig - this if test is needed because the nuopc cap sets - ! nu_diag before this routine is called. This creates a conflict. - ! In addition, in the nuopc cap, shr_file_setIO will fail if the - ! needed namelist is missing (which it is in the CIME nuopc implementation) - if (.not. nu_diag_set) then - inquire(file='ice_modelio.nml'//trim(inst_suffix),exist=exists) - if (exists) then - call get_fileUnit(nu_diag) - call shr_file_setIO('ice_modelio.nml'//trim(inst_suffix),nu_diag) - end if - endif - else - ! each task gets unique ice log filename when if test is true, for debugging - if (1 == 0) then - call get_fileUnit(nu_diag) - write(tmpstr,'(a,i4.4)') "ice.log.task_",my_task - open(nu_diag,file=tmpstr) - endif - end if - if (trim(ice_ic) /= 'default' .and. & - trim(ice_ic) /= 'none' .and. & - trim(ice_ic) /= 'internal') then - restart = .true. - end if -#else - if (trim(diag_type) == 'file') call get_fileunit(nu_diag) -#endif - - !----------------------------------------------------------------- - ! broadcast namelist settings - !----------------------------------------------------------------- - - call broadcast_scalar(numin, master_task) - call broadcast_scalar(numax, master_task) - call broadcast_scalar(days_per_year, master_task) - call broadcast_scalar(use_leap_years, master_task) - call broadcast_scalar(year_init, master_task) - call broadcast_scalar(month_init, master_task) - call broadcast_scalar(day_init, master_task) - call broadcast_scalar(sec_init, master_task) - call broadcast_scalar(istep0, master_task) - call broadcast_scalar(dt, master_task) - call broadcast_scalar(npt, master_task) - call broadcast_scalar(npt_unit, master_task) - call broadcast_scalar(diagfreq, master_task) - call broadcast_scalar(debug_model, master_task) - call broadcast_scalar(debug_model_step, master_task) - call broadcast_scalar(debug_model_i, master_task) - call broadcast_scalar(debug_model_j, master_task) - call broadcast_scalar(debug_model_iblk, master_task) - call broadcast_scalar(debug_model_task, master_task) - call broadcast_scalar(print_points, master_task) - call broadcast_scalar(print_global, master_task) - call broadcast_scalar(timer_stats, master_task) - call broadcast_scalar(memory_stats, master_task) - call broadcast_scalar(bfbflag, master_task) - call broadcast_scalar(diag_type, master_task) - call broadcast_scalar(diag_file, master_task) - do n = 1, max_nstrm - call broadcast_scalar(histfreq(n), master_task) - enddo - call broadcast_array(histfreq_n, master_task) - call broadcast_scalar(histfreq_base, master_task) - call broadcast_scalar(hist_avg, master_task) - call broadcast_scalar(history_dir, master_task) - call broadcast_scalar(history_file, master_task) - call broadcast_scalar(history_precision, master_task) - call broadcast_scalar(history_format, master_task) - call broadcast_scalar(write_ic, master_task) - call broadcast_scalar(cpl_bgc, master_task) - call broadcast_scalar(incond_dir, master_task) - call broadcast_scalar(incond_file, master_task) - call broadcast_scalar(dumpfreq, master_task) - call broadcast_scalar(dumpfreq_n, master_task) - call broadcast_scalar(dumpfreq_base, master_task) - call broadcast_scalar(dump_last, master_task) - call broadcast_scalar(restart_file, master_task) - call broadcast_scalar(restart, master_task) - call broadcast_scalar(restart_dir, master_task) - call broadcast_scalar(restart_ext, master_task) - call broadcast_scalar(restart_coszen, master_task) - call broadcast_scalar(use_restart_time, master_task) - call broadcast_scalar(restart_format, master_task) - call broadcast_scalar(lcdf64, master_task) - call broadcast_scalar(pointer_file, master_task) - call broadcast_scalar(ice_ic, master_task) - call broadcast_scalar(grid_format, master_task) - call broadcast_scalar(dxrect, master_task) - call broadcast_scalar(dyrect, master_task) - call broadcast_scalar(scale_dxdy, master_task) - call broadcast_scalar(dxscale, master_task) - call broadcast_scalar(dyscale, master_task) - call broadcast_scalar(lonrefrect, master_task) - call broadcast_scalar(latrefrect, master_task) - call broadcast_scalar(close_boundaries, master_task) - call broadcast_scalar(grid_type, master_task) - call broadcast_scalar(grid_ice, master_task) - call broadcast_scalar(grid_ocn, master_task) - call broadcast_scalar(grid_atm, master_task) - call broadcast_scalar(grid_file, master_task) - call broadcast_scalar(gridcpl_file, master_task) - call broadcast_scalar(orca_halogrid, master_task) - call broadcast_scalar(bathymetry_file, master_task) - call broadcast_scalar(bathymetry_format, master_task) - call broadcast_scalar(use_bathymetry, master_task) - call broadcast_scalar(kmt_type, master_task) - call broadcast_scalar(kmt_file, master_task) - call broadcast_scalar(kitd, master_task) - call broadcast_scalar(kcatbound, master_task) - call broadcast_scalar(kdyn, master_task) - call broadcast_scalar(ndtd, master_task) - call broadcast_scalar(ndte, master_task) - call broadcast_scalar(evp_algorithm, master_task) - call broadcast_scalar(elasticDamp, master_task) - call broadcast_scalar(pgl_global_ext, master_task) - call broadcast_scalar(brlx, master_task) - call broadcast_scalar(arlx, master_task) - call broadcast_scalar(revised_evp, master_task) - call broadcast_scalar(yield_curve, master_task) - call broadcast_scalar(kstrength, master_task) - call broadcast_scalar(Pstar, master_task) - call broadcast_scalar(Cstar, master_task) - call broadcast_scalar(krdg_partic, master_task) - call broadcast_scalar(krdg_redist, master_task) - call broadcast_scalar(mu_rdg, master_task) - call broadcast_scalar(Cf, master_task) - call broadcast_scalar(ksno, master_task) - call broadcast_scalar(seabed_stress, master_task) - call broadcast_scalar(seabed_stress_method, master_task) - call broadcast_scalar(k1, master_task) - call broadcast_scalar(k2, master_task) - call broadcast_scalar(alphab, master_task) - call broadcast_scalar(threshold_hw, master_task) - call broadcast_scalar(Ktens, master_task) - call broadcast_scalar(e_yieldcurve, master_task) - call broadcast_scalar(e_plasticpot, master_task) - call broadcast_scalar(visc_method, master_task) - call broadcast_scalar(deltaminEVP, master_task) - call broadcast_scalar(deltaminVP, master_task) - call broadcast_scalar(capping_method, master_task) - call broadcast_scalar(advection, master_task) - call broadcast_scalar(conserv_check, master_task) - call broadcast_scalar(shortwave, master_task) - call broadcast_scalar(albedo_type, master_task) - call broadcast_scalar(ktherm, master_task) - call broadcast_scalar(coriolis, master_task) - call broadcast_scalar(ssh_stress, master_task) - call broadcast_scalar(kridge, master_task) - call broadcast_scalar(ktransport, master_task) - call broadcast_scalar(maxits_nonlin, master_task) - call broadcast_scalar(precond, master_task) - call broadcast_scalar(dim_fgmres, master_task) - call broadcast_scalar(dim_pgmres, master_task) - call broadcast_scalar(maxits_fgmres, master_task) - call broadcast_scalar(maxits_pgmres, master_task) - call broadcast_scalar(monitor_nonlin, master_task) - call broadcast_scalar(monitor_fgmres, master_task) - call broadcast_scalar(monitor_pgmres, master_task) - call broadcast_scalar(ortho_type, master_task) - call broadcast_scalar(reltol_nonlin, master_task) - call broadcast_scalar(reltol_fgmres, master_task) - call broadcast_scalar(reltol_pgmres, master_task) - call broadcast_scalar(algo_nonlin, master_task) - call broadcast_scalar(fpfunc_andacc, master_task) - call broadcast_scalar(dim_andacc, master_task) - call broadcast_scalar(reltol_andacc, master_task) - call broadcast_scalar(damping_andacc, master_task) - call broadcast_scalar(start_andacc, master_task) - call broadcast_scalar(use_mean_vrel, master_task) - call broadcast_scalar(conduct, master_task) - call broadcast_scalar(R_ice, master_task) - call broadcast_scalar(R_pnd, master_task) - call broadcast_scalar(R_snw, master_task) - call broadcast_scalar(dT_mlt, master_task) - call broadcast_scalar(rsnw_mlt, master_task) - call broadcast_scalar(kalg, master_task) - call broadcast_scalar(hp1, master_task) - call broadcast_scalar(hs0, master_task) - call broadcast_scalar(hs1, master_task) - call broadcast_scalar(dpscale, master_task) - call broadcast_scalar(frzpnd, master_task) - call broadcast_scalar(rfracmin, master_task) - call broadcast_scalar(rfracmax, master_task) - call broadcast_scalar(pndaspect, master_task) - call broadcast_scalar(snwredist, master_task) - call broadcast_scalar(snw_aging_table, master_task) - call broadcast_scalar(snw_filename, master_task) - call broadcast_scalar(snw_tau_fname, master_task) - call broadcast_scalar(snw_kappa_fname, master_task) - call broadcast_scalar(snw_drdt0_fname, master_task) - call broadcast_scalar(snw_rhos_fname, master_task) - call broadcast_scalar(snw_Tgrd_fname, master_task) - call broadcast_scalar(snw_T_fname, master_task) - call broadcast_scalar(snwgrain, master_task) - call broadcast_scalar(use_smliq_pnd, master_task) - call broadcast_scalar(rsnw_fall, master_task) - call broadcast_scalar(rsnw_tmax, master_task) - call broadcast_scalar(rhosnew, master_task) - call broadcast_scalar(rhosmin, master_task) - call broadcast_scalar(rhosmax, master_task) - call broadcast_scalar(windmin, master_task) - call broadcast_scalar(drhosdwind, master_task) - call broadcast_scalar(snwlvlfac, master_task) - call broadcast_scalar(albicev, master_task) - call broadcast_scalar(albicei, master_task) - call broadcast_scalar(albsnowv, master_task) - call broadcast_scalar(albsnowi, master_task) - call broadcast_scalar(ahmax, master_task) - call broadcast_scalar(atmbndy, master_task) - call broadcast_scalar(default_season, master_task) - call broadcast_scalar(fyear_init, master_task) - call broadcast_scalar(ycycle, master_task) - call broadcast_scalar(atm_data_format, master_task) - call broadcast_scalar(atm_data_type, master_task) - call broadcast_scalar(atm_data_dir, master_task) - call broadcast_scalar(rotate_wind, master_task) - call broadcast_scalar(calc_strair, master_task) - call broadcast_scalar(calc_Tsfc, master_task) - call broadcast_scalar(formdrag, master_task) - call broadcast_scalar(highfreq, master_task) - call broadcast_scalar(natmiter, master_task) - call broadcast_scalar(atmiter_conv, master_task) - call broadcast_scalar(update_ocn_f, master_task) - call broadcast_scalar(l_mpond_fresh, master_task) - call broadcast_scalar(ustar_min, master_task) - call broadcast_scalar(iceruf, master_task) - call broadcast_scalar(iceruf_ocn, master_task) - call broadcast_scalar(calc_dragio, master_task) - call broadcast_scalar(emissivity, master_task) - call broadcast_scalar(fbot_xfer_type, master_task) - call broadcast_scalar(precip_units, master_task) - call broadcast_scalar(oceanmixed_ice, master_task) - call broadcast_scalar(wave_spec_type, master_task) - call broadcast_scalar(wave_spec_file, master_task) - call broadcast_scalar(nfreq, master_task) - call broadcast_scalar(tfrz_option, master_task) - call broadcast_scalar(ocn_data_format, master_task) - call broadcast_scalar(bgc_data_type, master_task) - call broadcast_scalar(fe_data_type, master_task) - call broadcast_scalar(ice_data_type, master_task) - call broadcast_scalar(ice_data_conc, master_task) - call broadcast_scalar(ice_data_dist, master_task) - call broadcast_scalar(bgc_data_dir, master_task) - call broadcast_scalar(ocn_data_type, master_task) - call broadcast_scalar(ocn_data_dir, master_task) - call broadcast_scalar(oceanmixed_file, master_task) - call broadcast_scalar(restore_ocn, master_task) - call broadcast_scalar(trestore, master_task) - call broadcast_scalar(restore_ice, master_task) - call broadcast_scalar(debug_forcing, master_task) - call broadcast_array (latpnt(1:2), master_task) - call broadcast_array (lonpnt(1:2), master_task) - call broadcast_scalar(runid, master_task) - call broadcast_scalar(runtype, master_task) - !call broadcast_scalar(nu_diag, master_task) - - ! tracers - call broadcast_scalar(tr_iage, master_task) - call broadcast_scalar(restart_age, master_task) - call broadcast_scalar(tr_FY, master_task) - call broadcast_scalar(restart_FY, master_task) - call broadcast_scalar(tr_lvl, master_task) - call broadcast_scalar(restart_lvl, master_task) - call broadcast_scalar(tr_pond_lvl, master_task) - call broadcast_scalar(restart_pond_lvl, master_task) - call broadcast_scalar(tr_pond_topo, master_task) - call broadcast_scalar(restart_pond_topo, master_task) - call broadcast_scalar(tr_snow, master_task) - call broadcast_scalar(restart_snow, master_task) - call broadcast_scalar(tr_iso, master_task) - call broadcast_scalar(restart_iso, master_task) - call broadcast_scalar(tr_aero, master_task) - call broadcast_scalar(restart_aero, master_task) - call broadcast_scalar(tr_fsd, master_task) - call broadcast_scalar(restart_fsd, master_task) - call broadcast_scalar(ncat, master_task) - call broadcast_scalar(nfsd, master_task) - call broadcast_scalar(nilyr, master_task) - call broadcast_scalar(nslyr, master_task) - call broadcast_scalar(nblyr, master_task) - call broadcast_scalar(n_iso, master_task) - call broadcast_scalar(n_aero, master_task) - call broadcast_scalar(n_zaero, master_task) - call broadcast_scalar(n_algae, master_task) - call broadcast_scalar(n_doc, master_task) - call broadcast_scalar(n_dic, master_task) - call broadcast_scalar(n_don, master_task) - call broadcast_scalar(n_fed, master_task) - call broadcast_scalar(n_fep, master_task) - call broadcast_scalar(a_rapid_mode, master_task) - call broadcast_scalar(floediam, master_task) - call broadcast_scalar(hfrazilmin, master_task) - call broadcast_scalar(Rac_rapid_mode, master_task) - call broadcast_scalar(aspect_rapid_mode, master_task) - call broadcast_scalar(dSdt_slow_mode, master_task) - call broadcast_scalar(phi_c_slow_mode, master_task) - call broadcast_scalar(phi_i_mushy, master_task) - call broadcast_scalar(sw_redist, master_task) - call broadcast_scalar(sw_frac, master_task) - call broadcast_scalar(sw_dtemp, master_task) - -#ifdef CESMCOUPLED - pointer_file = trim(pointer_file) // trim(inst_suffix) -#endif - - !----------------------------------------------------------------- - ! update defaults - !----------------------------------------------------------------- - - if (trim(ice_ic) == 'default') ice_ic = 'internal' - if (trim(ice_data_conc) == 'default') ice_data_conc = 'parabolic' - if (trim(ice_data_dist) == 'default') ice_data_dist = 'uniform' - if (trim(ice_data_type) == 'default') ice_data_type = 'latsst' - - !----------------------------------------------------------------- - ! verify inputs - !----------------------------------------------------------------- - - if (my_task == master_task) then - if (trim(diag_type) == 'file') then - write(ice_stdout,*) 'Diagnostic output will be in file ',diag_file - open (nu_diag, file=diag_file, status='unknown') - endif - write(nu_diag,*) '--------------------------------' - write(nu_diag,*) ' ',subname - write(nu_diag,*) ' CICE model diagnostic output ' - write(nu_diag,*) '--------------------------------' - write(nu_diag,*) ' ' - endif - - if (trim(runtype) == 'continue') then - if (my_task == master_task) then - write(nu_diag,*) subname//'NOTE: runtype=continue, setting restart=.true.' - if (.not. use_restart_time) & - write(nu_diag,*) subname//'NOTE: runtype=continue, setting use_restart_time=.true.' - write(nu_diag,*) ' ' - endif - restart = .true. - use_restart_time = .true. - elseif (trim(runtype) == 'initial') then - if (ice_ic == 'none' .or. ice_ic == 'internal') then - if (my_task == master_task) then - write(nu_diag,*) subname//'NOTE: ice_ic = none or internal, setting restart flags to .false.' - if (.not. use_restart_time) & - write(nu_diag,*) subname//'NOTE: ice_ic = none or internal, setting use_restart_time=.false.' - write(nu_diag,*) ' ' - endif - use_restart_time = .false. - restart = .false. - restart_iso = .false. - restart_aero = .false. - restart_fsd = .false. - restart_age = .false. - restart_fy = .false. - restart_lvl = .false. - restart_pond_lvl = .false. - restart_pond_topo = .false. - restart_snow = .false. -! tcraig, OK to leave as true, needed for boxrestore case -! restart_ext = .false. - else - if (my_task == master_task) then - write(nu_diag,*) subname//'NOTE: ice_ic /= none or internal, setting restart=.true.' - write(nu_diag,*) ' ' - endif - restart = .true. - endif - else - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: runtype unknown = ',trim(runtype) - endif - abort_list = trim(abort_list)//":1" - endif - - if (ktransport <= 0) then - advection = 'none' - endif - - if (ktransport > 0 .and. advection /= 'remap' .and. advection /= 'upwind') then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: invalid advection=',trim(advection) - abort_list = trim(abort_list)//":3" - endif - - if (ncat == 1 .and. kitd == 1) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: kitd incompatability: ncat=1 and kitd=1' - write(nu_diag,*) subname//' ERROR: Remapping the ITD is not allowed for ncat=1.' - write(nu_diag,*) subname//' ERROR: Use kitd = 0 (delta function ITD) with kcatbound = 0' - write(nu_diag,*) subname//' ERROR: or for column configurations use kcatbound = -1' - endif - abort_list = trim(abort_list)//":4" - endif - - if (ncat /= 1 .and. kcatbound == -1) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: ITD required for ncat > 1' - write(nu_diag,*) subname//' ERROR: ncat=',ncat,' kcatbound=',kcatbound - write(nu_diag,*) subname//' ERROR: Please review user guide' - endif - abort_list = trim(abort_list)//":5" - endif - - if (kdyn == 2 .and. revised_evp) then - if (my_task == master_task) then - write(nu_diag,*) subname//' WARNING: revised_evp = T with EAP dynamics' - write(nu_diag,*) subname//' WARNING: revised_evp is ignored' - endif - revised_evp = .false. - endif - - if (kdyn > 3) then - if (my_task == master_task) then - write(nu_diag,*) subname//' WARNING: kdyn out of range' - endif - abort_list = trim(abort_list)//":33" - endif - - if (seabed_stress) then - if (seabed_stress_method /= 'LKD' .and. seabed_stress_method /= 'probabilistic') then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: invalid seabed stress method' - write(nu_diag,*) subname//' ERROR: seabed_stress_method should be LKD or probabilistic' - endif - abort_list = trim(abort_list)//":34" - endif - endif - - if (grid_ice == 'CD') then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: grid_ice = CD not supported yet' - endif - abort_list = trim(abort_list)//":47" - elseif (grid_ice == 'C_override_D') then - if (my_task == master_task) then - write(nu_diag,*) subname//' WARNING: using grid_ice = CD, not supported' - endif - grid_ice = 'CD' - endif - - if (grid_ice == 'C' .or. grid_ice == 'CD') then - if (kdyn > 1) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: grid_ice = C | CD only supported with kdyn<=1 (evp or off)' - write(nu_diag,*) subname//' ERROR: kdyn and grid_ice inconsistency' - endif - abort_list = trim(abort_list)//":46" - endif - if (visc_method /= 'avg_zeta' .and. visc_method /= 'avg_strength') then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: invalid method for viscosities' - write(nu_diag,*) subname//' ERROR: visc_method should be avg_zeta or avg_strength' - endif - abort_list = trim(abort_list)//":44" - endif - endif - - capping = -9.99e30 - if (kdyn == 1 .or. kdyn == 3) then - if (capping_method == 'max') then - capping = c1 - elseif (capping_method == 'sum') then - capping = c0 - else - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: invalid method for capping viscosities' - write(nu_diag,*) subname//' ERROR: capping_method should be equal to max or sum' - endif - abort_list = trim(abort_list)//":45" - endif - endif - - rplvl = 0 - rptopo = 0 - if (tr_pond_lvl ) rplvl = 1 - if (tr_pond_topo) rptopo = 1 - - tr_pond = .false. ! explicit melt ponds - if (rplvl + rptopo > 0) tr_pond = .true. - - if (rplvl + rptopo > 1) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: Must use only one melt pond scheme' - endif - abort_list = trim(abort_list)//":6" - endif - - if (tr_pond_lvl .and. .not. tr_lvl) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: tr_pond_lvl=T but tr_lvl=F' - endif - abort_list = trim(abort_list)//":30" - endif - -! tcraig - this was originally implemented by resetting hs0=0. EH says it might be OK -! to not reset it but extra calculations are done and it might not be bfb. In our -! testing, we should explicitly set hs0 to 0. when setting tr_pond_lvl=T, and otherwise -! this will abort (safest option until additional testing is done) - if (tr_pond_lvl .and. abs(hs0) > puny) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: tr_pond_lvl=T and hs0 /= 0' - endif - abort_list = trim(abort_list)//":7" - endif - - if (trim(shortwave) /= 'dEdd' .and. tr_pond .and. calc_tsfc) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: tr_pond=T, calc_tsfc=T, invalid shortwave' - write(nu_diag,*) subname//' ERROR: Must use shortwave=dEdd' - endif - abort_list = trim(abort_list)//":8" - endif - - if (snwredist(1:4) /= 'none' .and. .not. tr_snow) then - if (my_task == master_task) then - write (nu_diag,*) 'ERROR: snwredist on but tr_snow=F' - write (nu_diag,*) 'ERROR: Use tr_snow=T for snow redistribution' - endif - abort_list = trim(abort_list)//":37" - endif - if (snwredist(1:4) == 'bulk' .and. .not. tr_lvl) then - if (my_task == master_task) then - write (nu_diag,*) 'ERROR: snwredist=bulk but tr_lvl=F' - write (nu_diag,*) 'ERROR: Use tr_lvl=T for snow redistribution' - endif - abort_list = trim(abort_list)//":38" - endif - if (snwredist(1:6) == 'ITDrdg' .and. .not. tr_lvl) then - if (my_task == master_task) then - write (nu_diag,*) 'ERROR: snwredist=ITDrdg but tr_lvl=F' - write (nu_diag,*) 'ERROR: Use tr_lvl=T for snow redistribution' - endif - abort_list = trim(abort_list)//":39" - endif - if (use_smliq_pnd .and. .not. snwgrain) then - if (my_task == master_task) then - write (nu_diag,*) 'ERROR: use_smliq_pnd = T but' - write (nu_diag,*) 'ERROR: snow metamorphosis not used' - write (nu_diag,*) 'ERROR: Use snwgrain=T with smliq for ponds' - endif - abort_list = trim(abort_list)//":40" - endif - if (use_smliq_pnd .and. .not. tr_snow) then - if (my_task == master_task) then - write (nu_diag,*) 'ERROR: use_smliq_pnd = T but' - write (nu_diag,*) 'ERROR: snow tracers are not active' - write (nu_diag,*) 'ERROR: Use tr_snow=T with smliq for ponds' - endif - abort_list = trim(abort_list)//":41" - endif - if (snwgrain .and. .not. tr_snow) then - if (my_task == master_task) then - write (nu_diag,*) 'ERROR: snwgrain=T but tr_snow=F' - write (nu_diag,*) 'ERROR: Use tr_snow=T for snow metamorphosis' - endif - abort_list = trim(abort_list)//":42" - endif - if (trim(snw_aging_table) /= 'test' .and. & - trim(snw_aging_table) /= 'snicar' .and. & - trim(snw_aging_table) /= 'file') then - if (my_task == master_task) then - write (nu_diag,*) 'ERROR: unknown snw_aging_table = '//trim(snw_aging_table) - endif - abort_list = trim(abort_list)//":43" - endif - - if (tr_iso .and. n_iso==0) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: isotopes activated but' - write(nu_diag,*) subname//' ERROR: not allocated in tracer array.' - write(nu_diag,*) subname//' ERROR: if tr_iso, n_iso must be > 0.' - endif - abort_list = trim(abort_list)//":31" - endif - - if (tr_aero .and. n_aero==0) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: aerosols activated but' - write(nu_diag,*) subname//' ERROR: not allocated in tracer array.' - write(nu_diag,*) subname//' ERROR: if tr_aero, n_aero must be > 0.' - endif - abort_list = trim(abort_list)//":9" - endif - - if (ncat < 1) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: ncat < 1' - endif - abort_list = trim(abort_list)//":32" - endif - - if (nilyr < 1) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: nilyr < 1' - endif - abort_list = trim(abort_list)//":2" - endif - - if (nslyr < 1) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: nslyr < 1' - endif - abort_list = trim(abort_list)//":34" - endif - - if (nblyr < 1) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: nblyr < 1' - write(nu_diag,*) subname//' ERROR: not allowed due to history implementation.' - endif - abort_list = trim(abort_list)//":35" - endif - - if (nfsd < 1) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: nfsd < 1' - write(nu_diag,*) subname//' ERROR: not allowed due to history implementation.' - endif - abort_list = trim(abort_list)//":36" - endif - - if (trim(shortwave) /= 'dEdd' .and. tr_aero) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: tr_aero=T, invalid shortwave' - write(nu_diag,*) subname//' ERROR: Must use shortwave=dEdd' - endif - abort_list = trim(abort_list)//":10" - endif - - if (trim(shortwave) /= 'dEdd' .and. snwgrain) then - if (my_task == master_task) then - write (nu_diag,*) 'WARNING: snow grain radius activated but' - write (nu_diag,*) 'WARNING: dEdd shortwave is not.' - endif - endif - - if ((rfracmin < -puny .or. rfracmin > c1+puny) .or. & - (rfracmax < -puny .or. rfracmax > c1+puny) .or. & - (rfracmin > rfracmax)) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: rfracmin, rfracmax must be between 0 and 1' - write(nu_diag,*) subname//' ERROR: and rfracmax >= rfracmin' - endif - abort_list = trim(abort_list)//":11" - endif - rfracmin = min(max(rfracmin,c0),c1) - rfracmax = min(max(rfracmax,c0),c1) - - if (trim(atm_data_type) == 'monthly' .and. calc_strair) then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: atm_data_type=monthly and calc_strair=T' - abort_list = trim(abort_list)//":12" - endif - - if (ktherm == 2 .and. .not. calc_Tsfc) then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: ktherm = 2 and calc_Tsfc=F' - abort_list = trim(abort_list)//":13" - endif - -! tcraig, is it really OK for users to run inconsistently? -! ech: yes, for testing sensitivities. It's not recommended for science runs - if (ktherm == 1 .and. trim(tfrz_option) /= 'linear_salt') then - if (my_task == master_task) then - write(nu_diag,*) subname//' WARNING: ktherm = 1 and tfrz_option = ',trim(tfrz_option) - write(nu_diag,*) subname//' WARNING: For consistency, set tfrz_option = linear_salt' - endif - endif - if (ktherm == 2 .and. trim(tfrz_option) /= 'mushy') then - if (my_task == master_task) then - write(nu_diag,*) subname//' WARNING: ktherm = 2 and tfrz_option = ',trim(tfrz_option) - write(nu_diag,*) subname//' WARNING: For consistency, set tfrz_option = mushy' - endif - endif -!tcraig - if (ktherm == 1 .and. .not.sw_redist) then - if (my_task == master_task) then - write(nu_diag,*) subname//' WARNING: ktherm = 1 and sw_redist = ',sw_redist - write(nu_diag,*) subname//' WARNING: For consistency, set sw_redist = .true.' - endif - endif - - if (trim(atmbndy) == 'default') then - if (my_task == master_task) then - write(nu_diag,*) subname//' WARNING: atmbndy = default is deprecated' - write(nu_diag,*) subname//' WARNING: setting atmbndy = similarity' - endif - atmbndy = 'similarity' - endif - - if (formdrag) then - if (trim(atmbndy) == 'constant') then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=T and atmbndy=constant' - abort_list = trim(abort_list)//":14" - endif - - if (.not. calc_strair) then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=T and calc_strair=F' - abort_list = trim(abort_list)//":15" - endif - - if (.not. tr_pond) then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=T and tr_pond=F' - abort_list = trim(abort_list)//":16" - endif - - if (.not. tr_lvl) then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=T and tr_lvl=F' - abort_list = trim(abort_list)//":18" - endif - endif - - if (trim(fbot_xfer_type) == 'Cdn_ocn' .and. .not. formdrag) then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: formdrag=F and fbot_xfer_type=Cdn_ocn' - abort_list = trim(abort_list)//":19" - endif - - if(history_precision .ne. 4 .and. history_precision .ne. 8) then - write (nu_diag,*) subname//' ERROR: bad value for history_precision, allowed values: 4, 8' - abort_list = trim(abort_list)//":22" - endif - - if(histfreq_base /= 'init' .and. histfreq_base /= 'zero') then - write (nu_diag,*) subname//' ERROR: bad value for histfreq_base, allowed values: init, zero' - abort_list = trim(abort_list)//":24" - endif - - if(dumpfreq_base /= 'init' .and. dumpfreq_base /= 'zero') then - write (nu_diag,*) subname//' ERROR: bad value for dumpfreq_base, allowed values: init, zero' - abort_list = trim(abort_list)//":25" - endif - - if (.not.(trim(dumpfreq) == 'y' .or. trim(dumpfreq) == 'Y' .or. & - trim(dumpfreq) == 'm' .or. trim(dumpfreq) == 'M' .or. & - trim(dumpfreq) == 'd' .or. trim(dumpfreq) == 'D' .or. & - trim(dumpfreq) == 'h' .or. trim(dumpfreq) == 'H' .or. & - trim(dumpfreq) == '1' )) then - if (my_task == master_task) then - write(nu_diag,*) subname//' WARNING: unrecognized dumpfreq=', trim(dumpfreq) - write(nu_diag,*) subname//' WARNING: No restarts files will be written' - write(nu_diag,*) subname//' WARNING: Allowed values : ''y'', ''m'', ''d'', ''h'', ''1''' - endif - endif - - ! Implicit solver input validation - if (kdyn == 3) then - if (.not. (trim(algo_nonlin) == 'picard' .or. trim(algo_nonlin) == 'anderson')) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: unknown algo_nonlin: '//algo_nonlin - write(nu_diag,*) subname//' ERROR: allowed values: ''picard'', ''anderson''' - endif - abort_list = trim(abort_list)//":60" - endif - - if (trim(algo_nonlin) == 'picard') then - ! Picard solver is implemented in the Anderson solver; reset number of saved residuals to zero - dim_andacc = 0 - endif - - if (.not. (trim(precond) == 'ident' .or. trim(precond) == 'diag' .or. trim(precond) == 'pgmres')) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: unknown precond: '//precond - write(nu_diag,*) subname//' ERROR: allowed values: ''ident'', ''diag'', ''pgmres''' - endif - abort_list = trim(abort_list)//":61" - endif - - if (.not. (trim(ortho_type) == 'cgs' .or. trim(ortho_type) == 'mgs')) then - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: unknown ortho_type: '//ortho_type - write(nu_diag,*) subname//' ERROR: allowed values: ''cgs'', ''mgs''' - endif - abort_list = trim(abort_list)//":62" - endif - endif - - ice_IOUnitsMinUnit = numin - ice_IOUnitsMaxUnit = numax - - call icepack_init_parameters(Cf_in=Cf) - call icepack_init_parameters(ksno_in=ksno) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname//'Icepack Abort1', & - file=__FILE__, line=__LINE__) - - wave_spec = .false. - if (tr_fsd .and. (trim(wave_spec_type) /= 'none')) wave_spec = .true. - if (tr_fsd .and. (trim(wave_spec_type) == 'none')) then - if (my_task == master_task) then - write(nu_diag,*) subname//' WARNING: tr_fsd=T but wave_spec=F - not recommended' - endif - end if - - ! compute grid locations for thermo, u and v fields - - grid_ice_thrm = 'T' - if (grid_ice == 'A') then - grid_ice_dynu = 'T' - grid_ice_dynv = 'T' - elseif (grid_ice == 'B') then - grid_ice_dynu = 'U' - grid_ice_dynv = 'U' - elseif (grid_ice == 'C') then - grid_ice_dynu = 'E' - grid_ice_dynv = 'N' - elseif (grid_ice == 'CD') then - grid_ice_dynu = 'NE' - grid_ice_dynv = 'NE' - else - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: unknown grid_ice: '//trim(grid_ice) - endif - abort_list = trim(abort_list)//":64" - endif - - grid_atm_thrm = 'T' - if (grid_atm == 'A') then - grid_atm_dynu = 'T' - grid_atm_dynv = 'T' - elseif (grid_atm == 'B') then - grid_atm_dynu = 'U' - grid_atm_dynv = 'U' - elseif (grid_atm == 'C') then - grid_atm_dynu = 'E' - grid_atm_dynv = 'N' - elseif (grid_atm == 'CD') then - grid_atm_dynu = 'NE' - grid_atm_dynv = 'NE' - else - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: unknown grid_atm: '//trim(grid_atm) - endif - abort_list = trim(abort_list)//":65" - endif - - grid_ocn_thrm = 'T' - if (grid_ocn == 'A') then - grid_ocn_dynu = 'T' - grid_ocn_dynv = 'T' - elseif (grid_ocn == 'B') then - grid_ocn_dynu = 'U' - grid_ocn_dynv = 'U' - elseif (grid_ocn == 'C') then - grid_ocn_dynu = 'E' - grid_ocn_dynv = 'N' - elseif (grid_ocn == 'CD') then - grid_ocn_dynu = 'NE' - grid_ocn_dynv = 'NE' - else - if (my_task == master_task) then - write(nu_diag,*) subname//' ERROR: unknown grid_ocn: '//trim(grid_ocn) - endif - abort_list = trim(abort_list)//":66" - endif - - !----------------------------------------------------------------- - ! spew - !----------------------------------------------------------------- - - if (my_task == master_task) then - - write(nu_diag,*) ' Overview of model configuration with relevant parameters' - write(nu_diag,*) '=========================================================' - write(nu_diag,*) 'For details, compare namelist output below with the' - write(nu_diag,*) 'Case Settings section in the model documentation.' - write(nu_diag,*) ' ' - write(nu_diag,*) ' Calendar' - write(nu_diag,*) '--------------------------------' - write(nu_diag,1020) ' days_per_year = ',days_per_year,' : number of days in a model year' - if (use_leap_years) then - tmpstr2 = ' : leap days are included' - else - tmpstr2 = ' : leap days are not included' - endif - write(nu_diag,1010) ' use_leap_years = ',use_leap_years,trim(tmpstr2) - write(nu_diag,1002) ' dt = ', dt, ' : model time step' - - write(nu_diag,*) ' ' - write(nu_diag,*) ' Grid, Discretization' - write(nu_diag,*) '--------------------------------' - tmpstr2 = ' ' - if (trim(grid_type) == 'rectangular') tmpstr2 = ' : internally defined, rectangular grid' - if (trim(grid_type) == 'regional') tmpstr2 = ' : user-defined, regional grid' - if (trim(grid_type) == 'displaced_pole') tmpstr2 = ' : user-defined grid with rotated north pole' - if (trim(grid_type) == 'tripole') tmpstr2 = ' : user-defined grid with northern hemisphere zipper' - write(nu_diag,1030) ' grid_type = ',trim(grid_type),trim(tmpstr2) - write(nu_diag,1030) ' grid_ice = ',trim(grid_ice) - write(nu_diag,1030) ' grid_ice_thrm = ',trim(grid_ice_thrm) - write(nu_diag,1030) ' grid_ice_dynu = ',trim(grid_ice_dynu) - write(nu_diag,1030) ' grid_ice_dynv = ',trim(grid_ice_dynv) - write(nu_diag,1030) ' grid_atm = ',trim(grid_atm) - write(nu_diag,1030) ' grid_atm_thrm = ',trim(grid_atm_thrm) - write(nu_diag,1030) ' grid_atm_dynu = ',trim(grid_atm_dynu) - write(nu_diag,1030) ' grid_atm_dynv = ',trim(grid_atm_dynv) - write(nu_diag,1030) ' grid_ocn = ',trim(grid_ocn) - write(nu_diag,1030) ' grid_ocn_thrm = ',trim(grid_ocn_thrm) - write(nu_diag,1030) ' grid_ocn_dynu = ',trim(grid_ocn_dynu) - write(nu_diag,1030) ' grid_ocn_dynv = ',trim(grid_ocn_dynv) - write(nu_diag,1030) ' kmt_type = ',trim(kmt_type) - if (trim(grid_type) /= 'rectangular') then - if (use_bathymetry) then - tmpstr2 = ' : bathymetric input data is used' - else - tmpstr2 = ' : bathymetric input data is not used' - endif - write(nu_diag,1010) ' use_bathymetry = ', use_bathymetry,trim(tmpstr2) - write(nu_diag,1030) ' bathymetry_format= ', trim(bathymetry_format) - endif - write(nu_diag,1020) ' nilyr = ', nilyr, ' : number of ice layers (equal thickness)' - write(nu_diag,1020) ' nslyr = ', nslyr, ' : number of snow layers (equal thickness)' - write(nu_diag,1020) ' nblyr = ', nblyr, ' : number of bio layers (equal thickness)' - if (trim(shortwave) == 'dEdd') & - write(nu_diag,*) 'dEdd interior and sfc scattering layers are used in both ice, snow (unequal)' - write(nu_diag,1020) ' ncat = ', ncat, ' : number of ice categories' - if (kcatbound == 0) then - tmpstr2 = ' : original ITD category bounds' - elseif (kcatbound == 1) then - tmpstr2 = ' : round-number category bounds' - elseif (kcatbound == 2) then - tmpstr2 = ' : WMO standard ITD categories' - elseif (kcatbound == -1) then - tmpstr2 = ' : one thickness category' - else - tmpstr2 = ' : unknown value' - endif - write(nu_diag,1020) ' kcatbound = ', kcatbound,trim(tmpstr2) - if (kitd==0) then - tmpstr2 = ' : delta function ITD approx' - else - tmpstr2 = ' : linear remapping ITD approx' - endif - write(nu_diag,1020) ' kitd = ', kitd,trim(tmpstr2) - - if (tr_fsd) then - tmpstr2 = ' : floe size distribution is enabled' - else - tmpstr2 = ' : floe size distribution is disabled' - endif - write(nu_diag,1010) ' tr_fsd = ', tr_fsd,trim(tmpstr2) - write(nu_diag,1020) ' nfsd = ', nfsd, ' : number of floe size categories' - - write(nu_diag,*) ' ' - write(nu_diag,*) ' Horizontal Dynamics' - write(nu_diag,*) '--------------------------------' - if (kdyn == 1) then - tmpstr2 = ' : elastic-viscous-plastic dynamics' - elseif (kdyn == 2) then - tmpstr2 = ' : elastic-anisotropic-plastic dynamics' - elseif (kdyn == 3) then - tmpstr2 = ' : viscous-plastic dynamics' - elseif (kdyn < 1) then - tmpstr2 = ' : dynamics disabled' - else - tmpstr2 = ' : unknown value' - endif - write(nu_diag,1020) ' kdyn = ', kdyn,trim(tmpstr2) - if (kdyn >= 1) then - if (kdyn == 1 .or. kdyn == 2) then - if (revised_evp) then - tmpstr2 = ' : revised EVP formulation used' - write(nu_diag,1002) ' arlx = ', arlx, ' : stress equation factor alpha' - write(nu_diag,1002) ' brlx = ', brlx, ' : stress equation factor beta' - else - tmpstr2 = ' : revised EVP formulation not used' - endif - write(nu_diag,1010) ' revised_evp = ', revised_evp,trim(tmpstr2) - - if (evp_algorithm == 'standard_2d') then - tmpstr2 = ' : standard 2d EVP solver' - elseif (evp_algorithm == 'shared_mem_1d') then - tmpstr2 = ' : vectorized 1d EVP solver' - pgl_global_ext = .true. - else - tmpstr2 = ' : unknown value' - endif - write(nu_diag,1031) ' evp_algorithm = ', trim(evp_algorithm),trim(tmpstr2) - write(nu_diag,1020) ' ndtd = ', ndtd, ' : number of dynamics/advection/ridging/steps per thermo timestep' - write(nu_diag,1020) ' ndte = ', ndte, ' : number of EVP or EAP subcycles' - endif - - if (kdyn == 1 .or. kdyn == 3) then - write(nu_diag,1030) ' yield_curve = ', trim(yield_curve), ' : yield curve' - if (trim(yield_curve) == 'ellipse') & - write(nu_diag,1002) ' e_yieldcurve = ', e_yieldcurve, ' : aspect ratio of yield curve' - write(nu_diag,1002) ' e_plasticpot = ', e_plasticpot, ' : aspect ratio of plastic potential' - endif - - if (kdyn == 1) then - write(nu_diag,1003) ' deltamin = ', deltaminEVP, ' : minimum delta for viscosities' - write(nu_diag,1030) ' capping_meth = ', trim(capping_method), ' : capping method for viscosities' - elseif (kdyn == 3) then - write(nu_diag,1003) ' deltamin = ', deltaminVP, ' : minimum delta for viscosities' - write(nu_diag,1030) ' capping_meth = ', trim(capping_method), ' : capping method for viscosities' - endif - !write(nu_diag,1002) ' capping = ', capping, ' : capping value for viscosities' - - write(nu_diag,1002) ' elasticDamp = ', elasticDamp, ' : coefficient for calculating the parameter E' - - if (trim(coriolis) == 'latitude') then - tmpstr2 = ' : latitude-dependent Coriolis parameter' - elseif (trim(coriolis) == 'contant') then - tmpstr2 = ' : = 1.46e-4/s' - elseif (trim(coriolis) == 'zero') then - tmpstr2 = ' : = 0.0' - else - tmpstr2 = ': unknown value' - endif - write(nu_diag,1030) ' coriolis = ',trim(coriolis),trim(tmpstr2) - - if (trim(ssh_stress) == 'geostrophic') then - tmpstr2 = ' : from ocean velocity' - elseif (trim(ssh_stress) == 'coupled') then - tmpstr2 = ' : from coupled sea surface height gradients' - else - tmpstr2 = ' : unknown value' - endif - write(nu_diag,1030) ' ssh_stress = ',trim(ssh_stress),trim(tmpstr2) - - if (trim(advection) == 'remap') then - tmpstr2 = ' : linear remapping advection' - elseif (trim(advection) == 'upwind') then - tmpstr2 = ' : donor cell (upwind) advection' - elseif (trim(advection) == 'none') then - tmpstr2 = ' : advection disabled by ktransport namelist' - else - tmpstr2 = ' : unknown value' - endif - write(nu_diag,1030) ' advection = ', trim(advection),trim(tmpstr2) - - if (seabed_stress) then - tmpstr2 = ' : use seabed stress parameterization for landfast ice' - else - tmpstr2 = ' : no seabed stress parameterization' - endif - write(nu_diag,1010) ' seabed_stress = ', seabed_stress,trim(tmpstr2) - if (seabed_stress) then - write(nu_diag,1030) ' seabed method = ',trim(seabed_stress_method) - if (seabed_stress_method == 'LKD') then - write(nu_diag,1002) ' k1 = ', k1, ' : free parameter for landfast ice' - write(nu_diag,1002) ' k2 = ', k2, ' : free parameter for landfast ice' - write(nu_diag,1002) ' alphab = ', alphab, ' : factor for landfast ice' - write(nu_diag,1002) ' threshold_hw = ', threshold_hw, ' : max water depth for grounding ice' - elseif (seabed_stress_method == 'probabilistic') then - write(nu_diag,1002) ' alphab = ', alphab, ' : factor for landfast ice' - endif - endif - if (grid_ice == 'C' .or. grid_ice == 'CD') then - write(nu_diag,1030) ' visc_method= ', trim(visc_method),' : viscosities method (U point)' - endif - - write(nu_diag,1002) ' Ktens = ', Ktens, ' : tensile strength factor' - - if (kdyn == 3) then - write(nu_diag,1020) ' maxits_nonlin = ', maxits_nonlin,' : max nb of iteration for nonlinear solver' - write(nu_diag,1030) ' precond = ', trim(precond),' : preconditioner for FGMRES' - write(nu_diag,1020) ' dim_fgmres = ', dim_fgmres,' : size of FGMRES Krylov subspace' - write(nu_diag,1020) ' dim_pgmres = ', dim_pgmres,' : size of PGMRES Krylov subspace' - write(nu_diag,1020) ' maxits_fgmres = ', maxits_fgmres,' : max nb of iteration for FGMRES' - write(nu_diag,1020) ' maxits_pgmres = ', maxits_pgmres,' : max nb of iteration for PGMRES' - write(nu_diag,1010) ' monitor_nonlin = ', monitor_nonlin,' : print nonlinear residual norm' - write(nu_diag,1010) ' monitor_fgmres = ', monitor_fgmres,' : print FGMRES residual norm' - write(nu_diag,1010) ' monitor_pgmres = ', monitor_pgmres,' : print PGMRES residual norm' - write(nu_diag,1030) ' ortho_type = ', trim(ortho_type),' : type of orthogonalization for FGMRES' - write(nu_diag,1009) ' reltol_nonlin = ', reltol_nonlin,' : nonlinear stopping criterion' - write(nu_diag,1009) ' reltol_fgmres = ', reltol_fgmres,' : FGMRES stopping criterion' - write(nu_diag,1009) ' reltol_pgmres = ', reltol_pgmres,' : PGMRES stopping criterion' - write(nu_diag,1030) ' algo_nonlin = ', trim(algo_nonlin),' : nonlinear algorithm' - write(nu_diag,1010) ' use_mean_vrel = ', use_mean_vrel,' : use mean of previous 2 iterates to compute vrel' - if (algo_nonlin == 'anderson') then - write(nu_diag,1020) ' fpfunc_andacc = ', fpfunc_andacc,' : fixed point function for Anderson acceleration' - write(nu_diag,1020) ' dim_andacc = ', dim_andacc,' : size of Anderson minimization matrix' - write(nu_diag,1009) ' reltol_andacc = ', reltol_andacc,' : relative tolerance for Anderson acceleration' - write(nu_diag,1000) ' damping_andacc = ', damping_andacc,' : damping factor for Anderson acceleration' - write(nu_diag,1020) ' start_andacc = ', start_andacc,' : nonlinear iteration at which acceleration starts' - endif - endif - - endif ! kdyn enabled - - write(nu_diag,*) ' ' - write(nu_diag,*) ' Mechanical Deformation (Ridging) and Ice Strength' - write(nu_diag,*) '--------------------------------------------------' - if (kridge == 1) then - tmpstr2 = ' : ridging enabled' - else - tmpstr2 = ' : ridging disabled' - endif - write(nu_diag,1010) ' tr_lvl = ', tr_lvl,' : ridging related tracers' - write(nu_diag,1020) ' kridge = ', kridge,trim(tmpstr2) - if (kridge == 1) then - if (krdg_partic == 1) then - tmpstr2 = ' : new participation function' - else - tmpstr2 = ' : old participation function' - endif - write(nu_diag,1020) ' krdg_partic = ', krdg_partic,trim(tmpstr2) - if (krdg_partic == 1) & - write(nu_diag,1002) ' mu_rdg = ', mu_rdg,' : e-folding scale of ridged ice' - if (krdg_redist == 1) then - tmpstr2 = ' : new redistribution function' - else - tmpstr2 = ' : old redistribution function' - endif - write(nu_diag,1020) ' krdg_redist = ', krdg_redist,trim(tmpstr2) - endif - - if (kstrength == 0) then - tmpstr2 = ' : Hibler (1979)' - elseif (kstrength == 1) then - tmpstr2 = ' : Rothrock (1975)' - else - tmpstr2 = ' : unknown value' - endif - write(nu_diag,1020) ' kstrength = ', kstrength,trim(tmpstr2) - if (kstrength == 0) then - write(nu_diag,1009) ' Pstar = ', Pstar, ' : P* strength factor' - write(nu_diag,1002) ' Cstar = ', Cstar, ' : C* strength exponent factor' - elseif (kstrength == 1) then - write(nu_diag,1002) ' Cf = ', Cf, ' : ratio of ridging work to PE change' - endif - - write(nu_diag,*) ' ' - write(nu_diag,*) ' Thermodynamics' - write(nu_diag,*) '--------------------------------' - - if (ktherm == 1) then - tmpstr2 = ' : Bitz and Lipscomb 1999 thermo' - elseif (ktherm == 2) then - tmpstr2 = ' : mushy-layer thermo' - elseif (ktherm < 0) then - tmpstr2 = ' : Thermodynamics disabled' - else - tmpstr2 = ' : unknown value' - endif - write(nu_diag,1020) ' ktherm = ', ktherm,trim(tmpstr2) - if (ktherm >= 0) then - write(nu_diag,1002) ' dt = ', dt, ' : thermodynamic time step' - write(nu_diag,1002) ' ksno = ', ksno,' : snow thermal conductivity' - if (ktherm == 1) & - write(nu_diag,1030) ' conduct = ', trim(conduct),' : ice thermal conductivity' - if (ktherm == 2) then - write(nu_diag,1002) ' a_rapid_mode = ', a_rapid_mode,' : brine channel diameter' - write(nu_diag,1002) ' Rac_rapid_mode = ', Rac_rapid_mode,' : critical Rayleigh number' - write(nu_diag,1002) ' aspect_rapid_mode= ', aspect_rapid_mode,' : brine convection aspect ratio' - write(nu_diag,1009) ' dSdt_slow_mode = ', dSdt_slow_mode,' : drainage strength parameter' - write(nu_diag,1002) ' phi_c_slow_mode = ', phi_c_slow_mode,' : critical liquid fraction' - write(nu_diag,1002) ' phi_i_mushy = ', phi_i_mushy,' : solid fraction at lower boundary' - endif - write(nu_diag,1002) ' hfrazilmin = ', hfrazilmin,' : minimum new frazil ice thickness' - - write(nu_diag,*) ' ' - write(nu_diag,*) ' Radiation' - write(nu_diag,*) '--------------------------------' - if (trim(shortwave) == 'dEdd') then - tmpstr2 = ' : delta-Eddington multiple-scattering method' - elseif (trim(shortwave) == 'ccsm3') then - tmpstr2 = ' : NCAR CCSM3 distribution method' - else - tmpstr2 = ' : unknown value' - endif - write(nu_diag,1030) ' shortwave = ', trim(shortwave),trim(tmpstr2) - if (trim(shortwave) == 'dEdd') then - write(nu_diag,1002) ' R_ice = ', R_ice,' : tuning parameter for sea ice albedo' - write(nu_diag,1002) ' R_pnd = ', R_pnd,' : tuning parameter for ponded sea ice albedo' - write(nu_diag,1002) ' R_snw = ', R_snw,' : tuning parameter for snow broadband albedo' - write(nu_diag,1002) ' dT_mlt = ', dT_mlt,' : change in temperature per change in snow grain radius' - write(nu_diag,1002) ' rsnw_mlt = ', rsnw_mlt,' : maximum melting snow grain radius' - write(nu_diag,1002) ' kalg = ', kalg,' : absorption coefficient for algae' - else - if (trim(albedo_type) == 'ccsm3') then - tmpstr2 = ' : NCAR CCSM3 albedos' - elseif (trim(albedo_type) == 'constant') then - tmpstr2 = ' : four constant albedos' - else - tmpstr2 = ' : unknown value' - abort_list = trim(abort_list)//":23" - endif - write(nu_diag,1030) ' albedo_type = ', trim(albedo_type),trim(tmpstr2) - if (trim(albedo_type) == 'ccsm3') then - write(nu_diag,1002) ' albicev = ', albicev,' : visible ice albedo for thicker ice' - write(nu_diag,1002) ' albicei = ', albicei,' : near infrared ice albedo for thicker ice' - write(nu_diag,1002) ' albsnowv = ', albsnowv,' : visible, cold snow albedo' - write(nu_diag,1002) ' albsnowi = ', albsnowi,' : near infrared, cold snow albedo' - write(nu_diag,1002) ' ahmax = ', ahmax,' : albedo is constant above this thickness' - write(nu_diag,1002) ' ahmax = ', ahmax,' : albedo is constant above this thickness' - endif - endif - write(nu_diag,1000) ' emissivity = ', emissivity,' : emissivity of snow and ice' - write(nu_diag,1010) ' sw_redist = ', sw_redist,' : redistribute internal shortwave to surface' - if (sw_redist) then - write(nu_diag,1002) ' sw_frac = ', sw_frac,' : fraction redistributed' - write(nu_diag,1002) ' sw_dtemp = ', sw_dtemp,' : temperature difference from freezing to redistribute' - endif - endif - - write(nu_diag,*) ' ' - write(nu_diag,*) ' Atmospheric Forcing / Coupling' - write(nu_diag,*) '--------------------------------' - write(nu_diag,1010) ' calc_Tsfc = ', calc_Tsfc,' : calculate surface temperature as part of thermo' - write(nu_diag,1010) ' calc_strair = ', calc_strair,' : calculate wind stress and speed' - write(nu_diag,1010) ' rotate_wind = ', rotate_wind,' : rotate wind/stress to computational grid' - write(nu_diag,1010) ' formdrag = ', formdrag,' : use form drag parameterization' - write(nu_diag,1000) ' iceruf = ', iceruf, ' : ice surface roughness at atmosphere interface (m)' - if (trim(atmbndy) == 'constant') then - tmpstr2 = ' : constant-based boundary layer' - elseif (trim(atmbndy) == 'similarity' .or. & - trim(atmbndy) == 'mixed') then - write(nu_diag,1010) ' highfreq = ', highfreq,' : high-frequency atmospheric coupling' - write(nu_diag,1020) ' natmiter = ', natmiter,' : number of atmo boundary layer iterations' - write(nu_diag,1002) ' atmiter_conv = ', atmiter_conv,' : convergence criterion for ustar' - if (trim(atmbndy) == 'similarity') then - tmpstr2 = ' : stability-based boundary layer' - else - tmpstr2 = ' : stability-based boundary layer for wind stress, constant-based for sensible+latent heat fluxes' - endif - else - tmpstr2 = ' : unknown value' - endif - write(nu_diag,1030) ' atmbndy = ', trim(atmbndy),trim(tmpstr2) - - write(nu_diag,*) ' ' - write(nu_diag,*) ' Oceanic Forcing / Coupling' - write(nu_diag,*) '--------------------------------' - if (oceanmixed_ice) then - tmpstr2 = ' : ocean mixed layer calculation (SST) enabled' - else - tmpstr2 = ' : ocean mixed layer calculation (SST) disabled' - endif - write(nu_diag,1010) ' oceanmixed_ice = ', oceanmixed_ice,trim(tmpstr2) - if (oceanmixed_ice) then - write(nu_diag,*) ' WARNING: ocean mixed layer ON' - write(nu_diag,*) ' WARNING: will impact ocean forcing interaction' - write(nu_diag,*) ' WARNING: coupled forcing will be modified by mixed layer routine' - endif - if (trim(tfrz_option) == 'minus1p8') then - tmpstr2 = ' : constant ocean freezing temperature (-1.8C)' - elseif (trim(tfrz_option) == 'linear_salt') then - tmpstr2 = ' : linear function of salinity (use with ktherm=1)' - elseif (trim(tfrz_option) == 'mushy') then - tmpstr2 = ' : Assur (1958) as in mushy-layer thermo (ktherm=2)' - else - tmpstr2 = ' : unknown value' - endif - write(nu_diag,1030) ' tfrz_option = ', trim(tfrz_option),trim(tmpstr2) - if (update_ocn_f) then - tmpstr2 = ' : frazil water/salt fluxes included in ocean fluxes' - else - tmpstr2 = ' : frazil water/salt fluxes not included in ocean fluxes' - endif - write(nu_diag,1010) ' update_ocn_f = ', update_ocn_f,trim(tmpstr2) - if (l_mpond_fresh .and. tr_pond_topo) then - tmpstr2 = ' : retain (topo) pond water until ponds drain' - else - tmpstr2 = ' : pond water not retained on ice (virtual only)' - endif - write(nu_diag,1010) ' l_mpond_fresh = ', l_mpond_fresh,trim(tmpstr2) - if (trim(fbot_xfer_type) == 'constant') then - tmpstr2 = ' : ocean heat transfer coefficient is constant' - elseif (trim(fbot_xfer_type) == 'Cdn_ocn') then - tmpstr2 = ' : variable ocean heat transfer coefficient' ! only used with form_drag=T? - else - tmpstr2 = ' : unknown value' - endif - write(nu_diag,1030) ' fbot_xfer_type = ', trim(fbot_xfer_type),trim(tmpstr2) - write(nu_diag,1000) ' ustar_min = ', ustar_min,' : minimum value of ocean friction velocity' - if (calc_dragio) then - tmpstr2 = ' : dragio computed from iceruf_ocn' - else - tmpstr2 = ' : dragio hard-coded' - endif - write(nu_diag,1010) ' calc_dragio = ', calc_dragio,trim(tmpstr2) - if(calc_dragio) then - write(nu_diag,1002) ' iceruf_ocn = ', iceruf_ocn,' : under-ice roughness length' - endif - - if (tr_fsd) then - write(nu_diag,1002) ' floediam = ', floediam, ' constant floe diameter' - if (wave_spec) then - tmpstr2 = ' : use wave spectrum for floe size distribution' - else - tmpstr2 = 'WARNING : floe size distribution does not use wave spectrum' - endif - write(nu_diag,1010) ' wave_spec = ', wave_spec,trim(tmpstr2) - if (wave_spec) then - if (trim(wave_spec_type) == 'none') then - tmpstr2 = ' : no wave data provided, no wave-ice interactions' - elseif (trim(wave_spec_type) == 'profile') then - tmpstr2 = ' : use fixed dummy wave spectrum for testing, sea surface height generated using constant phase (1 iteration of wave fracture)' - elseif (trim(wave_spec_type) == 'constant') then - tmpstr2 = ' : wave spectrum data file provided, sea surface height generated using constant phase (1 iteration of wave fracture)' - elseif (trim(wave_spec_type) == 'random') then - tmpstr2 = ' : wave spectrum data file provided, sea surface height generated using random number (multiple iterations of wave fracture to convergence)' - else - tmpstr2 = ' : unknown value' - endif - write(nu_diag,1030) ' wave_spec_type = ', trim(wave_spec_type),trim(tmpstr2) - endif - write(nu_diag,1020) ' nfreq = ', nfreq,' : number of wave spectral forcing frequencies' - endif - - write(nu_diag,*) ' ' - write(nu_diag,*) ' Age related tracers' - write(nu_diag,*) '--------------------------------' - write(nu_diag,1010) ' tr_iage = ', tr_iage,' : chronological ice age' - write(nu_diag,1010) ' tr_FY = ', tr_FY,' : first-year ice area' - - write(nu_diag,*) ' ' - write(nu_diag,*) ' Melt ponds' - write(nu_diag,*) '--------------------------------' - if (tr_pond_lvl) then - write(nu_diag,1010) ' tr_pond_lvl = ', tr_pond_lvl,' : level-ice pond formulation' - write(nu_diag,1002) ' pndaspect = ', pndaspect,' : ratio of pond depth to area fraction' - write(nu_diag,1000) ' dpscale = ', dpscale,' : time scale for flushing in permeable ice' - if (trim(frzpnd) == 'hlid') then - tmpstr2 = ' : Stefan refreezing with pond ice thickness' - elseif (trim(frzpnd) == 'cesm') then - tmpstr2 = ' : CESM refreezing empirical formula' - else - tmpstr2 = ' : unknown value' - endif - write(nu_diag,1030) ' frzpnd = ', trim(frzpnd),trim(tmpstr2) - write(nu_diag,1002) ' hs1 = ', hs1,' : snow depth of transition to pond ice' - elseif (tr_pond_topo) then - write(nu_diag,1010) ' tr_pond_topo = ', tr_pond_topo,' : topo pond formulation' - write(nu_diag,1002) ' hp1 = ', hp1,' : critical ice lid thickness for topo ponds' - elseif (trim(shortwave) == 'ccsm3') then - write(nu_diag,*) 'Pond effects on radiation are treated implicitly in the ccsm3 shortwave scheme' - else - write(nu_diag,*) 'Using default dEdd melt pond scheme for testing only' - endif - - if (trim(shortwave) == 'dEdd') then - write(nu_diag,1002) ' hs0 = ', hs0,' : snow depth of transition to bare sea ice' - endif - - write(nu_diag,1002) ' rfracmin = ', rfracmin,' : minimum fraction of melt water added to ponds' - write(nu_diag,1002) ' rfracmax = ', rfracmax,' : maximum fraction of melt water added to ponds' - - write(nu_diag,*) ' ' - write(nu_diag,*) ' Snow redistribution/metamorphism tracers' - write(nu_diag,*) '-----------------------------------------' - if (tr_snow) then - write(nu_diag,1010) ' tr_snow = ', tr_snow, & - ' : advanced snow physics' - if (snwredist(1:4) == 'none') then - write(nu_diag,1030) ' snwredist = ', trim(snwredist), & - ' : Snow redistribution scheme turned off' - else - if (snwredist(1:4) == 'bulk') then - write(nu_diag,1030) ' snwredist = ', trim(snwredist), & - ' : Using bulk snow redistribution scheme' - elseif (snwredist(1:6) == 'ITDrdg') then - write(nu_diag,1030) ' snwredist = ', trim(snwredist), & - ' : Using ridging based snow redistribution scheme' - write(nu_diag,1002) ' rhosnew = ', rhosnew, & - ' : new snow density (kg/m^3)' - write(nu_diag,1002) ' rhosmin = ', rhosmin, & - ' : minimum snow density (kg/m^3)' - write(nu_diag,1002) ' rhosmax = ', rhosmax, & - ' : maximum snow density (kg/m^3)' - write(nu_diag,1002) ' windmin = ', windmin, & - ' : minimum wind speed to compact snow (m/s)' - write(nu_diag,1002) ' drhosdwind = ', drhosdwind, & - ' : wind compaction factor (kg s/m^4)' - endif - write(nu_diag,1002) ' snwlvlfac = ', snwlvlfac, & - ' : fractional increase in snow depth for redistribution on ridges' - endif - if (.not. snwgrain) then - write(nu_diag,1010) ' snwgrain = ', snwgrain, & - ' : Snow metamorphosis turned off' - else - write(nu_diag,1010) ' snwgrain = ', snwgrain, & - ' : Using snow metamorphosis scheme' - write(nu_diag,1002) ' rsnw_tmax = ', rsnw_tmax, & - ' : maximum snow radius (10^-6 m)' - endif - write(nu_diag,1002) ' rsnw_fall = ', rsnw_fall, & - ' : radius of new snow (10^-6 m)' - if (snwgrain) then - if (use_smliq_pnd) then - tmpstr2 = ' : Using liquid water in snow for melt ponds' - else - tmpstr2 = ' : NOT using liquid water in snow for melt ponds' - endif - write(nu_diag,1010) ' use_smliq_pnd = ', use_smliq_pnd, trim(tmpstr2) - if (snw_aging_table == 'test') then - tmpstr2 = ' : Using 5x5x1 test matrix of internallly defined snow aging parameters' - write(nu_diag,1030) ' snw_aging_table = ', trim(snw_aging_table),trim(tmpstr2) - elseif (snw_aging_table == 'snicar') then - tmpstr2 = ' : Reading 3D snow aging parameters from SNICAR file' - write(nu_diag,1030) ' snw_aging_table = ', trim(snw_aging_table),trim(tmpstr2) - write(nu_diag,1031) ' snw_filename = ',trim(snw_filename) - write(nu_diag,1031) ' snw_tau_fname = ',trim(snw_tau_fname) - write(nu_diag,1031) ' snw_kappa_fname = ',trim(snw_kappa_fname) - write(nu_diag,1031) ' snw_drdt0_fname = ',trim(snw_drdt0_fname) - elseif (snw_aging_table == 'file') then - tmpstr2 = ' : Reading 1D and 3D snow aging dimensions and parameters from external file' - write(nu_diag,1030) ' snw_aging_table = ', trim(snw_aging_table),trim(tmpstr2) - write(nu_diag,1031) ' snw_filename = ',trim(snw_filename) - write(nu_diag,1031) ' snw_rhos_fname = ',trim(snw_rhos_fname) - write(nu_diag,1031) ' snw_Tgrd_fname = ',trim(snw_Tgrd_fname) - write(nu_diag,1031) ' snw_T_fname = ',trim(snw_T_fname) - write(nu_diag,1031) ' snw_tau_fname = ',trim(snw_tau_fname) - write(nu_diag,1031) ' snw_kappa_fname = ',trim(snw_kappa_fname) - write(nu_diag,1031) ' snw_drdt0_fname = ',trim(snw_drdt0_fname) - endif - endif - endif - - write(nu_diag,*) ' ' - write(nu_diag,*) ' Primary state variables, tracers' - write(nu_diag,*) ' (excluding biogeochemistry)' - write(nu_diag,*) '---------------------------------' - write(nu_diag,*) 'Conserved properties (all tracers are conserved):' - write(nu_diag,*) 'ice concentration, volume and enthalpy' - write(nu_diag,*) 'snow volume and enthalpy' - if (ktherm == 2) write(nu_diag,1030) ' ice salinity' - if (tr_fsd) write(nu_diag,1010) ' tr_fsd = ', tr_fsd,' : floe size distribution' - if (tr_lvl) write(nu_diag,1010) ' tr_lvl = ', tr_lvl,' : ridging related tracers' - if (tr_pond_lvl) write(nu_diag,1010) ' tr_pond_lvl = ', tr_pond_lvl,' : level-ice pond formulation' - if (tr_pond_topo) write(nu_diag,1010) ' tr_pond_topo = ', tr_pond_topo,' : topo pond formulation' - if (tr_snow) write(nu_diag,1010) ' tr_snow = ', tr_snow,' : advanced snow physics' - if (tr_iage) write(nu_diag,1010) ' tr_iage = ', tr_iage,' : chronological ice age' - if (tr_FY) write(nu_diag,1010) ' tr_FY = ', tr_FY,' : first-year ice area' - if (tr_iso) write(nu_diag,1010) ' tr_iso = ', tr_iso,' : diagnostic isotope tracers' - if (tr_aero) write(nu_diag,1010) ' tr_aero = ', tr_aero,' : CESM aerosol tracers' - write(nu_diag,*) 'Non-conserved properties:' - write(nu_diag,*) 'ice surface temperature' - write(nu_diag,*) 'ice velocity components and internal stress' - - write(nu_diag,*) ' ' - write(nu_diag,*) ' Other ice_in namelist parameters:' - write(nu_diag,*) '===================================== ' - if (trim(runid) /= 'unknown') & - write(nu_diag,1031) ' runid = ', trim(runid) - write(nu_diag,1031) ' runtype = ', trim(runtype) - write(nu_diag,1021) ' year_init = ', year_init - write(nu_diag,1021) ' month_init = ', month_init - write(nu_diag,1021) ' day_init = ', day_init - write(nu_diag,1021) ' sec_init = ', sec_init - write(nu_diag,1021) ' istep0 = ', istep0 - write(nu_diag,1031) ' npt_unit = ', trim(npt_unit) - write(nu_diag,1021) ' npt = ', npt - write(nu_diag,1021) ' diagfreq = ', diagfreq - write(nu_diag,1011) ' print_global = ', print_global - write(nu_diag,1011) ' print_points = ', print_points - write(nu_diag,1011) ' debug_model = ', debug_model - write(nu_diag,1022) ' debug_model_step = ', debug_model_step - write(nu_diag,1021) ' debug_model_i = ', debug_model_i - write(nu_diag,1021) ' debug_model_i = ', debug_model_j - write(nu_diag,1021) ' debug_model_iblk = ', debug_model_iblk - write(nu_diag,1021) ' debug_model_task = ', debug_model_task - write(nu_diag,1011) ' timer_stats = ', timer_stats - write(nu_diag,1011) ' memory_stats = ', memory_stats - write(nu_diag,1031) ' bfbflag = ', trim(bfbflag) - write(nu_diag,1021) ' numin = ', numin - write(nu_diag,1021) ' numax = ', numax - write(nu_diag,1033) ' histfreq = ', histfreq(:) - write(nu_diag,1023) ' histfreq_n = ', histfreq_n(:) - write(nu_diag,1031) ' histfreq_base = ', trim(histfreq_base) - write(nu_diag,1011) ' hist_avg = ', hist_avg - if (.not. hist_avg) write(nu_diag,1039) ' History data will be snapshots' - write(nu_diag,1031) ' history_dir = ', trim(history_dir) - write(nu_diag,1031) ' history_file = ', trim(history_file) - write(nu_diag,1021) ' history_precision= ', history_precision - write(nu_diag,1031) ' history_format = ', trim(history_format) - if (write_ic) then - write(nu_diag,1039) ' Initial condition will be written in ', & - trim(incond_dir) - endif - write(nu_diag,1031) ' dumpfreq = ', trim(dumpfreq) - write(nu_diag,1021) ' dumpfreq_n = ', dumpfreq_n - write(nu_diag,1031) ' dumpfreq_base = ', trim(dumpfreq_base) - write(nu_diag,1011) ' dump_last = ', dump_last - write(nu_diag,1011) ' restart = ', restart - write(nu_diag,1031) ' restart_dir = ', trim(restart_dir) - write(nu_diag,1011) ' restart_ext = ', restart_ext - write(nu_diag,1011) ' restart_coszen = ', restart_coszen - write(nu_diag,1031) ' restart_format = ', trim(restart_format) - write(nu_diag,1011) ' lcdf64 = ', lcdf64 - write(nu_diag,1031) ' restart_file = ', trim(restart_file) - write(nu_diag,1031) ' pointer_file = ', trim(pointer_file) - write(nu_diag,1011) ' use_restart_time = ', use_restart_time - write(nu_diag,1031) ' ice_ic = ', trim(ice_ic) - if (trim(grid_type) /= 'rectangular' .or. & - trim(grid_type) /= 'column') then - write(nu_diag,1031) ' grid_file = ', trim(grid_file) - write(nu_diag,1031) ' gridcpl_file = ', trim(gridcpl_file) - write(nu_diag,1031) ' bathymetry_file = ', trim(bathymetry_file) - if (trim(kmt_type) == 'file') & - write(nu_diag,1031) ' kmt_file = ', trim(kmt_file) - endif - write(nu_diag,1011) ' orca_halogrid = ', orca_halogrid - - write(nu_diag,1011) ' conserv_check = ', conserv_check - - write(nu_diag,1021) ' fyear_init = ', fyear_init - write(nu_diag,1021) ' ycycle = ', ycycle - write(nu_diag,1031) ' atm_data_type = ', trim(atm_data_type) - if (trim(atm_data_type) /= 'default') then - write(nu_diag,1031) ' atm_data_dir = ', trim(atm_data_dir) - write(nu_diag,1031) ' precip_units = ', trim(precip_units) - elseif (trim(atm_data_type)=='default') then - write(nu_diag,1031) ' default_season = ', trim(default_season) - endif - - if (wave_spec) then - write(nu_diag,1031) ' wave_spec_file = ', trim(wave_spec_file) - endif - if (trim(bgc_data_type) == 'ncar' .or. & - trim(ocn_data_type) == 'ncar') then - write(nu_diag,1031) ' oceanmixed_file = ', trim(oceanmixed_file) - endif - if (cpl_bgc) then - write(nu_diag,*) 'BGC coupling is switched ON' - else - write(nu_diag,*) 'BGC coupling is switched OFF' - endif - write(nu_diag,1031) ' bgc_data_type = ', trim(bgc_data_type) - write(nu_diag,1031) ' fe_data_type = ', trim(fe_data_type) - write(nu_diag,1031) ' ice_data_type = ', trim(ice_data_type) - write(nu_diag,1031) ' ice_data_conc = ', trim(ice_data_conc) - write(nu_diag,1031) ' ice_data_dist = ', trim(ice_data_dist) - write(nu_diag,1031) ' bgc_data_dir = ', trim(bgc_data_dir) - write(nu_diag,1031) ' ocn_data_type = ', trim(ocn_data_type) - if (trim(bgc_data_type) /= 'default' .or. & - trim(ocn_data_type) /= 'default') then - write(nu_diag,1031) ' ocn_data_dir = ', trim(ocn_data_dir) - write(nu_diag,1011) ' restore_ocn = ', restore_ocn - endif - write(nu_diag,1011) ' restore_ice = ', restore_ice - if (restore_ice .or. restore_ocn) & - write(nu_diag,1021) ' trestore = ', trestore - - write(nu_diag,*) ' ' - write(nu_diag,'(a31,2f8.2)') 'Diagnostic point 1: lat, lon =', & - latpnt(1), lonpnt(1) - write(nu_diag,'(a31,2f8.2)') 'Diagnostic point 2: lat, lon =', & - latpnt(2), lonpnt(2) - write(nu_diag,*) ' ' - - ! tracer restarts - write(nu_diag,1011) ' restart_age = ', restart_age - write(nu_diag,1011) ' restart_FY = ', restart_FY - write(nu_diag,1011) ' restart_lvl = ', restart_lvl - write(nu_diag,1011) ' restart_pond_lvl = ', restart_pond_lvl - write(nu_diag,1011) ' restart_pond_topo= ', restart_pond_topo - write(nu_diag,1011) ' restart_snow = ', restart_snow - write(nu_diag,1011) ' restart_iso = ', restart_iso - write(nu_diag,1011) ' restart_aero = ', restart_aero - write(nu_diag,1011) ' restart_fsd = ', restart_fsd - - write(nu_diag,1021) ' n_iso = ', n_iso - write(nu_diag,1021) ' n_aero = ', n_aero - write(nu_diag,1021) ' n_zaero = ', n_zaero - write(nu_diag,1021) ' n_algae = ', n_algae - write(nu_diag,1021) ' n_doc = ', n_doc - write(nu_diag,1021) ' n_dic = ', n_dic - write(nu_diag,1021) ' n_don = ', n_don - write(nu_diag,1021) ' n_fed = ', n_fed - write(nu_diag,1021) ' n_fep = ', n_fep - write(nu_diag,*) ' ' - - endif ! my_task = master_task - - if (grid_type /= 'displaced_pole' .and. & - grid_type /= 'tripole' .and. & - grid_type /= 'column' .and. & - grid_type /= 'rectangular' .and. & - grid_type /= 'cpom_grid' .and. & - grid_type /= 'regional' .and. & - grid_type /= 'latlon' .and. & - grid_type /= 'setmask' ) then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown grid_type=',trim(grid_type) - abort_list = trim(abort_list)//":20" - endif - - if (grid_ice /= 'B' .and. & - grid_ice /= 'C' .and. & - grid_ice /= 'CD' ) then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown grid_ice=',trim(grid_ice) - abort_list = trim(abort_list)//":26" - endif - - if (kmt_type /= 'file' .and. & - kmt_type /= 'channel' .and. & - kmt_type /= 'wall' .and. & - kmt_type /= 'default' .and. & - kmt_type /= 'boxislands') then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown kmt_type=',trim(kmt_type) - abort_list = trim(abort_list)//":27" - endif - - if (grid_type /= 'column' .and. & - grid_type /= 'rectangular' .and. & - kmt_type /= 'file') then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: need kmt file, kmt_type=',trim(kmt_type) - abort_list = trim(abort_list)//":28" - endif - - if (kdyn == 1 .and. & - evp_algorithm /= 'standard_2d' .and. & - evp_algorithm /= 'shared_mem_1d') then - if (my_task == master_task) write(nu_diag,*) subname//' ERROR: unknown evp_algorithm=',trim(evp_algorithm) - abort_list = trim(abort_list)//":21" - endif - - if (abort_list /= "") then - call flush_fileunit(nu_diag) - endif - call ice_barrier() - if (abort_list /= "") then - write(nu_diag,*) subname,' ERROR: abort_list = ',trim(abort_list) - call abort_ice (subname//' ABORTING on input ERRORS', & - file=__FILE__, line=__LINE__) - endif - - call icepack_init_parameters(ustar_min_in=ustar_min, albicev_in=albicev, albicei_in=albicei, & - albsnowv_in=albsnowv, albsnowi_in=albsnowi, natmiter_in=natmiter, atmiter_conv_in=atmiter_conv, & - emissivity_in=emissivity, & - ahmax_in=ahmax, shortwave_in=shortwave, albedo_type_in=albedo_type, R_ice_in=R_ice, R_pnd_in=R_pnd, & - R_snw_in=R_snw, dT_mlt_in=dT_mlt, rsnw_mlt_in=rsnw_mlt, & - kstrength_in=kstrength, krdg_partic_in=krdg_partic, krdg_redist_in=krdg_redist, mu_rdg_in=mu_rdg, & - atmbndy_in=atmbndy, calc_strair_in=calc_strair, formdrag_in=formdrag, highfreq_in=highfreq, & - kitd_in=kitd, kcatbound_in=kcatbound, hs0_in=hs0, dpscale_in=dpscale, frzpnd_in=frzpnd, & - rfracmin_in=rfracmin, rfracmax_in=rfracmax, pndaspect_in=pndaspect, hs1_in=hs1, hp1_in=hp1, & - ktherm_in=ktherm, calc_Tsfc_in=calc_Tsfc, conduct_in=conduct, & - a_rapid_mode_in=a_rapid_mode, Rac_rapid_mode_in=Rac_rapid_mode, & - floediam_in=floediam, hfrazilmin_in=hfrazilmin, & - aspect_rapid_mode_in=aspect_rapid_mode, dSdt_slow_mode_in=dSdt_slow_mode, & - phi_c_slow_mode_in=phi_c_slow_mode, phi_i_mushy_in=phi_i_mushy, conserv_check_in=conserv_check, & - wave_spec_type_in = wave_spec_type, & - wave_spec_in=wave_spec, nfreq_in=nfreq, & - tfrz_option_in=tfrz_option, kalg_in=kalg, fbot_xfer_type_in=fbot_xfer_type, & - Pstar_in=Pstar, Cstar_in=Cstar, iceruf_in=iceruf, iceruf_ocn_in=iceruf_ocn, calc_dragio_in=calc_dragio, & - windmin_in=windmin, drhosdwind_in=drhosdwind, & - rsnw_fall_in=rsnw_fall, rsnw_tmax_in=rsnw_tmax, rhosnew_in=rhosnew, & - snwlvlfac_in=snwlvlfac, rhosmin_in=rhosmin, rhosmax_in=rhosmax, & - snwredist_in=snwredist, snwgrain_in=snwgrain, snw_aging_table_in=trim(snw_aging_table), & - sw_redist_in=sw_redist, sw_frac_in=sw_frac, sw_dtemp_in=sw_dtemp) - call icepack_init_tracer_flags(tr_iage_in=tr_iage, tr_FY_in=tr_FY, & - tr_lvl_in=tr_lvl, tr_iso_in=tr_iso, tr_aero_in=tr_aero, & - tr_fsd_in=tr_fsd, tr_snow_in=tr_snow, tr_pond_in=tr_pond, & - tr_pond_lvl_in=tr_pond_lvl, tr_pond_topo_in=tr_pond_topo) - call icepack_init_tracer_sizes(ncat_in=ncat, nilyr_in=nilyr, nslyr_in=nslyr, nblyr_in=nblyr, & - nfsd_in=nfsd, n_algae_in=n_algae, n_iso_in=n_iso, n_aero_in=n_aero, & - n_DOC_in=n_DOC, n_DON_in=n_DON, & - n_DIC_in=n_DIC, n_fed_in=n_fed, n_fep_in=n_fep, n_zaero_in=n_zaero) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - 1000 format (a20,1x,f13.6,1x,a) ! float - 1002 format (a20,5x,f9.2,1x,a) - 1003 format (a20,1x,G13.4,1x,a) - 1009 format (a20,1x,d13.6,1x,a) - 1010 format (a20,8x,l6,1x,a) ! logical - 1011 format (a20,1x,l6) - 1020 format (a20,8x,i6,1x,a) ! integer - 1021 format (a20,1x,i6) - 1022 format (a20,1x,i12) - 1023 format (a20,1x,6i6) - 1030 format (a20,a14,1x,a) ! character - 1031 format (a20,1x,a,a) - 1033 format (a20,1x,6a6) - 1039 format (a,1x,a,1x,a,1x,a) - - end subroutine input_data - -!======================================================================= - -! Initialize state for the itd model -! -! authors: C. M. Bitz, UW -! William H. Lipscomb, LANL - - subroutine init_state - - use ice_blocks, only: block, get_block, nx_block, ny_block - use ice_domain, only: nblocks, blocks_ice, halo_info - use ice_domain_size, only: ncat, nilyr, nslyr, n_iso, n_aero, nfsd - use ice_flux, only: sst, Tf, Tair, salinz, Tmltz - use ice_grid, only: tmask, ULON, TLAT, grid_ice, grid_average_X2Y - use ice_boundary, only: ice_HaloUpdate - use ice_constants, only: field_loc_Nface, field_loc_Eface, field_type_scalar - use ice_state, only: trcr_depend, aicen, trcrn, vicen, vsnon, & - aice0, aice, vice, vsno, trcr, aice_init, bound_state, & - n_trcr_strata, nt_strata, trcr_base, uvel, vvel, & - uvelN, vvelN, uvelE, vvelE - - integer (kind=int_kind) :: & - ilo, ihi , & ! physical domain indices - jlo, jhi , & ! physical domain indices - iglob(nx_block), & ! global indices - jglob(ny_block), & ! global indices - i, j , & ! horizontal indices - k , & ! vertical index - it , & ! tracer index - iblk ! block index - - - integer (kind=int_kind) :: ntrcr - logical (kind=log_kind) :: tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero - logical (kind=log_kind) :: tr_pond_lvl, tr_pond_topo - logical (kind=log_kind) :: tr_snow, tr_fsd - integer (kind=int_kind) :: nt_Tsfc, nt_sice, nt_qice, nt_qsno, nt_iage, nt_FY - integer (kind=int_kind) :: nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd - integer (kind=int_kind) :: nt_smice, nt_smliq, nt_rhos, nt_rsnw - integer (kind=int_kind) :: nt_isosno, nt_isoice, nt_aero, nt_fsd - - type (block) :: & - this_block ! block information for current block - - character(len=*), parameter :: subname='(init_state)' - - !----------------------------------------------------------------- - - call icepack_query_tracer_sizes(ntrcr_out=ntrcr) - call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & - tr_lvl_out=tr_lvl, tr_iso_out=tr_iso, tr_aero_out=tr_aero, & - tr_pond_lvl_out=tr_pond_lvl, tr_pond_topo_out=tr_pond_topo, & - tr_snow_out=tr_snow, tr_fsd_out=tr_fsd) - call icepack_query_tracer_indices(nt_Tsfc_out=nt_Tsfc, nt_sice_out=nt_sice, & - nt_qice_out=nt_qice, nt_qsno_out=nt_qsno, nt_iage_out=nt_iage, nt_fy_out=nt_fy, & - nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, & - nt_ipnd_out=nt_ipnd, nt_aero_out=nt_aero, nt_fsd_out=nt_fsd, & - nt_smice_out=nt_smice, nt_smliq_out=nt_smliq, & - nt_rhos_out=nt_rhos, nt_rsnw_out=nt_rsnw, & - nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) - - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - !----------------------------------------------------------------- - ! Check number of layers in ice and snow. - !----------------------------------------------------------------- - - if (my_task == master_task) then - - if (nilyr < 1) then - write(nu_diag,*) subname//' ERROR: Must have at least one ice layer' - write(nu_diag,*) subname//' ERROR: nilyr =', nilyr - call abort_ice (error_message=subname//' Not enough ice layers', & - file=__FILE__, line=__LINE__) - endif - - if (nslyr < 1) then - write(nu_diag,*) subname//' ERROR: Must have at least one snow layer' - write(nu_diag,*) subname//' ERROR: nslyr =', nslyr - call abort_ice(error_message=subname//' Not enough snow layers', & - file=__FILE__, line=__LINE__) - endif - - endif ! my_task - - !----------------------------------------------------------------- - ! Set tracer types - !----------------------------------------------------------------- - - trcr_depend(nt_Tsfc) = 0 ! ice/snow surface temperature - do k = 1, nilyr - trcr_depend(nt_sice + k - 1) = 1 ! volume-weighted ice salinity - trcr_depend(nt_qice + k - 1) = 1 ! volume-weighted ice enthalpy - enddo - do k = 1, nslyr - trcr_depend(nt_qsno + k - 1) = 2 ! volume-weighted snow enthalpy - enddo - if (tr_iage) trcr_depend(nt_iage) = 1 ! volume-weighted ice age - if (tr_FY) trcr_depend(nt_FY) = 0 ! area-weighted first-year ice area - if (tr_lvl) trcr_depend(nt_alvl) = 0 ! level ice area - if (tr_lvl) trcr_depend(nt_vlvl) = 1 ! level ice volume - if (tr_pond_lvl) then - trcr_depend(nt_apnd) = 2+nt_alvl ! melt pond area - trcr_depend(nt_hpnd) = 2+nt_apnd ! melt pond depth - trcr_depend(nt_ipnd) = 2+nt_apnd ! refrozen pond lid - endif - if (tr_pond_topo) then - trcr_depend(nt_apnd) = 0 ! melt pond area - trcr_depend(nt_hpnd) = 2+nt_apnd ! melt pond depth - trcr_depend(nt_ipnd) = 2+nt_apnd ! refrozen pond lid - endif - if (tr_snow) then ! snow-volume-weighted snow tracers - do k = 1, nslyr - trcr_depend(nt_smice + k - 1) = 2 ! ice mass in snow - trcr_depend(nt_smliq + k - 1) = 2 ! liquid mass in snow - trcr_depend(nt_rhos + k - 1) = 2 ! effective snow density - trcr_depend(nt_rsnw + k - 1) = 2 ! snow radius - enddo - endif - if (tr_fsd) then - do it = 1, nfsd - trcr_depend(nt_fsd + it - 1) = 0 ! area-weighted floe size distribution - enddo - endif - if (tr_iso) then ! isotopes - do it = 1, n_iso - trcr_depend(nt_isosno+it-1) = 2 ! snow - trcr_depend(nt_isoice+it-1) = 1 ! ice - enddo - endif - if (tr_aero) then ! volume-weighted aerosols - do it = 1, n_aero - trcr_depend(nt_aero+(it-1)*4 ) = 2 ! snow - trcr_depend(nt_aero+(it-1)*4+1) = 2 ! snow - trcr_depend(nt_aero+(it-1)*4+2) = 1 ! ice - trcr_depend(nt_aero+(it-1)*4+3) = 1 ! ice - enddo - endif - - trcr_base = c0 - - do it = 1, ntrcr - ! mask for base quantity on which tracers are carried - if (trcr_depend(it) == 0) then ! area - trcr_base(it,1) = c1 - elseif (trcr_depend(it) == 1) then ! ice volume - trcr_base(it,2) = c1 - elseif (trcr_depend(it) == 2) then ! snow volume - trcr_base(it,3) = c1 - else - trcr_base(it,1) = c1 ! default: ice area - trcr_base(it,2) = c0 - trcr_base(it,3) = c0 - endif - - ! initialize number of underlying tracer layers - n_trcr_strata(it) = 0 - ! default indices of underlying tracer layers - nt_strata (it,1) = 0 - nt_strata (it,2) = 0 - enddo - - if (tr_pond_lvl) then - n_trcr_strata(nt_apnd) = 1 ! melt pond area - nt_strata (nt_apnd,1) = nt_alvl ! on level ice area - n_trcr_strata(nt_hpnd) = 2 ! melt pond depth - nt_strata (nt_hpnd,2) = nt_apnd ! on melt pond area - nt_strata (nt_hpnd,1) = nt_alvl ! on level ice area - n_trcr_strata(nt_ipnd) = 2 ! refrozen pond lid - nt_strata (nt_ipnd,2) = nt_apnd ! on melt pond area - nt_strata (nt_ipnd,1) = nt_alvl ! on level ice area - endif - if (tr_pond_topo) then - n_trcr_strata(nt_hpnd) = 1 ! melt pond depth - nt_strata (nt_hpnd,1) = nt_apnd ! on melt pond area - n_trcr_strata(nt_ipnd) = 1 ! refrozen pond lid - nt_strata (nt_ipnd,1) = nt_apnd ! on melt pond area - endif - - !----------------------------------------------------------------- - ! Set state variables - !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk,ilo,ihi,jlo,jhi,this_block, & - !$OMP iglob,jglob) - do iblk = 1, nblocks - - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - iglob = this_block%i_glob - jglob = this_block%j_glob - - call set_state_var (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - iglob, jglob, & - ice_ic, tmask(:,:, iblk), & - ULON (:,:, iblk), & - TLAT (:,:, iblk), & - Tair (:,:, iblk), sst (:,:, iblk), & - Tf (:,:, iblk), & - salinz(:,:,:, iblk), Tmltz(:,:,:, iblk), & - aicen(:,:, :,iblk), trcrn(:,:,:,:,iblk), & - vicen(:,:, :,iblk), vsnon(:,:, :,iblk), & - uvel (:,:, iblk), vvel (:,:, iblk)) - - enddo ! iblk - !$OMP END PARALLEL DO - - !----------------------------------------------------------------- - ! ghost cell updates - !----------------------------------------------------------------- - - call bound_state (aicen, & - vicen, vsnon, & - ntrcr, trcrn) - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - - call grid_average_X2Y('S',uvel,'U',uvelN,'N') - call grid_average_X2Y('S',vvel,'U',vvelN,'N') - call grid_average_X2Y('S',uvel,'U',uvelE,'E') - call grid_average_X2Y('S',vvel,'U',vvelE,'E') - - ! Halo update on North, East faces - call ice_HaloUpdate(uvelN, halo_info, & - field_loc_Nface, field_type_scalar) - call ice_HaloUpdate(vvelN, halo_info, & - field_loc_Nface, field_type_scalar) - - call ice_HaloUpdate(uvelE, halo_info, & - field_loc_Eface, field_type_scalar) - call ice_HaloUpdate(vvelE, halo_info, & - field_loc_Eface, field_type_scalar) - - endif - - - !----------------------------------------------------------------- - ! compute aggregate ice state and open water area - !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk,it,i,j) - do iblk = 1, nblocks - - do j = 1, ny_block - do i = 1, nx_block - aice(i,j,iblk) = c0 - vice(i,j,iblk) = c0 - vsno(i,j,iblk) = c0 - do it = 1, ntrcr - trcr(i,j,it,iblk) = c0 - enddo - - if (tmask(i,j,iblk)) & - call icepack_aggregate(ncat = ncat, & - aicen = aicen(i,j,:,iblk), & - trcrn = trcrn(i,j,:,:,iblk), & - vicen = vicen(i,j,:,iblk), & - vsnon = vsnon(i,j,:,iblk), & - aice = aice (i,j, iblk), & - trcr = trcr (i,j,:,iblk), & - vice = vice (i,j, iblk), & - vsno = vsno (i,j, iblk), & - aice0 = aice0(i,j, iblk), & - ntrcr = ntrcr, & - trcr_depend = trcr_depend(:), & - trcr_base = trcr_base(:,:), & - n_trcr_strata = n_trcr_strata(:), & - nt_strata = nt_strata(:,:)) - - aice_init(i,j,iblk) = aice(i,j,iblk) - - enddo - enddo - - enddo ! iblk - !$OMP END PARALLEL DO - - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - end subroutine init_state - -!======================================================================= - -! Initialize state in each ice thickness category -! -! authors: C. M. Bitz -! William H. Lipscomb, LANL - - subroutine set_state_var (nx_block, ny_block, & - ilo, ihi, jlo, jhi, & - iglob, jglob, & - ice_ic, tmask, & - ULON, & - TLAT, & - Tair, sst, & - Tf, & - salinz, Tmltz, & - aicen, trcrn, & - vicen, vsnon, & - uvel, vvel) - - - use ice_arrays_column, only: hin_max - use ice_domain_size, only: nilyr, nslyr, nx_global, ny_global, ncat - use ice_grid, only: dxrect, dyrect - use ice_forcing, only: ice_data_type, ice_data_conc, ice_data_dist - - integer (kind=int_kind), intent(in) :: & - nx_block, ny_block, & ! block dimensions - ilo, ihi , & ! physical domain indices - jlo, jhi , & ! - iglob(nx_block) , & ! global indices - jglob(ny_block) ! - - character(len=char_len_long), intent(in) :: & - ice_ic ! method of ice cover initialization - - logical (kind=log_kind), dimension (nx_block,ny_block), intent(in) :: & - tmask ! true for ice/ocean cells - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - ULON , & ! longitude of velocity pts (radians) - TLAT ! latitude of temperature pts (radians) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - Tair , & ! air temperature (K) - Tf , & ! freezing temperature (C) - sst ! sea surface temperature (C) - - real (kind=dbl_kind), dimension (nx_block,ny_block,nilyr), intent(in) :: & - salinz , & ! initial salinity profile - Tmltz ! initial melting temperature profile - - real (kind=dbl_kind), dimension (nx_block,ny_block,ncat), intent(out) :: & - aicen , & ! concentration of ice - vicen , & ! volume per unit area of ice (m) - vsnon ! volume per unit area of snow (m) - - real (kind=dbl_kind), intent(out), dimension (:,:,:,:) :: & ! (nx_block,ny_block,ntrcr,ncat) - trcrn ! ice tracers - ! 1: surface temperature of ice/snow (C) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(out) :: & - uvel , & ! ice velocity B grid - vvel ! - - ! local variables - integer (kind=int_kind) :: & - i, j , & ! horizontal indices - ij , & ! horizontal index, combines i and j loops - k , & ! ice layer index - n , & ! thickness category index - it , & ! tracer index - iedge , & ! edge around big block - jedge , & ! edge around big block - icells ! number of cells initialized with ice - - logical (kind=log_kind) :: & - in_slot, in_cyl ! boxslotcyl flags - - real (kind=dbl_kind) :: & ! boxslotcyl parameters - diam , & ! cylinder diameter - radius , & ! cylinder radius - center_x, & ! cylinder center - center_y, & - width , & ! slot width - length ! slot height - - integer (kind=int_kind), dimension(nx_block*ny_block) :: & - indxi, indxj ! compressed indices for cells with aicen > puny - - real (kind=dbl_kind) :: & - Tsfc, sum, hbar, abar, puny, rhos, Lfresh, rad_to_deg, rsnw_fall, dist_ratio - - real (kind=dbl_kind), dimension(ncat) :: & - ainit, hinit ! initial area, thickness - - real (kind=dbl_kind), dimension(nilyr) :: & - qin ! ice enthalpy (J/m3) - - real (kind=dbl_kind), dimension(nslyr) :: & - qsn ! snow enthalpy (J/m3) - - real (kind=dbl_kind), parameter :: & - hsno_init = 0.20_dbl_kind , & ! initial snow thickness (m) - edge_init_nh = 70._dbl_kind, & ! initial ice edge, N.Hem. (deg) - edge_init_sh = -60._dbl_kind ! initial ice edge, S.Hem. (deg) - - real (kind=dbl_kind) :: & ! boxslotcyl - pi , & ! pi - secday , & ! seconds per day - max_vel , & ! max velocity - domain_length , & ! physical domain length - period ! rotational period - - logical (kind=log_kind) :: tr_brine, tr_lvl, tr_snow - integer (kind=int_kind) :: ntrcr - integer (kind=int_kind) :: nt_Tsfc, nt_qice, nt_qsno, nt_sice - integer (kind=int_kind) :: nt_fbri, nt_alvl, nt_vlvl - integer (kind=int_kind) :: nt_smice, nt_smliq, nt_rhos, nt_rsnw - - character(len=*), parameter :: subname='(set_state_var)' - - !----------------------------------------------------------------- - - call icepack_query_tracer_sizes(ntrcr_out=ntrcr) - call icepack_query_tracer_flags(tr_brine_out=tr_brine, tr_lvl_out=tr_lvl, & - tr_snow_out=tr_snow) - call icepack_query_tracer_indices( nt_Tsfc_out=nt_Tsfc, nt_qice_out=nt_qice, & - nt_qsno_out=nt_qsno, nt_sice_out=nt_sice, & - nt_fbri_out=nt_fbri, nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & - nt_smice_out=nt_smice, nt_smliq_out=nt_smliq, & - nt_rhos_out=nt_rhos, nt_rsnw_out=nt_rsnw) - call icepack_query_parameters(rhos_out=rhos, Lfresh_out=Lfresh, puny_out=puny, & - rad_to_deg_out=rad_to_deg, rsnw_fall_out=rsnw_fall) - call icepack_query_parameters(secday_out=secday, pi_out=pi) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - indxi(:) = 0 - indxj(:) = 0 - - ! Initialize state variables. - ! If restarting, these values are overwritten. - - do n = 1, ncat - do j = 1, ny_block - do i = 1, nx_block - aicen(i,j,n) = c0 - vicen(i,j,n) = c0 - vsnon(i,j,n) = c0 - if (tmask(i,j)) then - trcrn(i,j,nt_Tsfc,n) = Tf(i,j) ! surface temperature - else - trcrn(i,j,nt_Tsfc,n) = c0 ! at land grid cells (for clean history/restart files) - endif - if (ntrcr >= 2) then - do it = 2, ntrcr - trcrn(i,j,it,n) = c0 - enddo - endif - if (tr_lvl) trcrn(i,j,nt_alvl,n) = c1 - if (tr_lvl) trcrn(i,j,nt_vlvl,n) = c1 - if (tr_brine) trcrn(i,j,nt_fbri,n) = c1 - do k = 1, nilyr - trcrn(i,j,nt_sice+k-1,n) = salinz(i,j,k) - enddo - do k = 1, nslyr - trcrn(i,j,nt_qsno+k-1,n) = -rhos * Lfresh - enddo - if (tr_snow) then - do k = 1, nslyr - trcrn(i,j,nt_rsnw +k-1,n) = rsnw_fall - trcrn(i,j,nt_rhos +k-1,n) = rhos - trcrn(i,j,nt_smice+k-1,n) = rhos - trcrn(i,j,nt_smliq+k-1,n) = c0 - enddo ! nslyr - endif - enddo - enddo - enddo - - if (trim(ice_ic) == 'internal') then - - !--------------------------------------------------------- - ! ice concentration/thickness - !--------------------------------------------------------- - - if (trim(ice_data_conc) == 'p5' .or. & - trim(ice_data_conc) == 'p8' .or. & - trim(ice_data_conc) == 'p9' .or. & - trim(ice_data_conc) == 'c1' .or. & - trim(ice_data_conc) == 'box2001') then - - if (trim(ice_data_conc) == 'p5') then - hbar = c2 ! initial ice thickness - abar = p5 ! initial ice concentration - elseif (trim(ice_data_conc) == 'p8') then - hbar = c1 ! initial ice thickness - abar = 0.8_dbl_kind ! initial ice concentration - elseif (trim(ice_data_conc) == 'p9') then - hbar = c1 ! initial ice thickness - abar = 0.9_dbl_kind ! initial ice concentration - elseif (trim(ice_data_conc) == 'c1') then - hbar = c1 ! initial ice thickness - abar = c1 ! initial ice concentration - elseif (trim(ice_data_conc) == 'box2001') then - hbar = c2 ! initial ice thickness - abar = p5 ! initial ice concentration - endif - - do n = 1, ncat - hinit(n) = c0 - ainit(n) = c0 - if (hbar > hin_max(n-1) .and. hbar <= hin_max(n)) then - hinit(n) = hbar - ainit(n) = abar - endif - enddo - - elseif (trim(ice_data_conc) == 'parabolic') then - - ! initial category areas in cells with ice - hbar = c3 ! initial ice thickness with greatest area - ! Note: the resulting average ice thickness - ! tends to be less than hbar due to the - ! nonlinear distribution of ice thicknesses - sum = c0 - do n = 1, ncat - if (n < ncat) then - hinit(n) = p5*(hin_max(n-1) + hin_max(n)) ! m - else ! n=ncat - hinit(n) = (hin_max(n-1) + c1) ! m - endif - ! parabola, max at h=hbar, zero at h=0, 2*hbar - ainit(n) = max(c0, (c2*hbar*hinit(n) - hinit(n)**2)) - sum = sum + ainit(n) - enddo - do n = 1, ncat - ainit(n) = ainit(n) / (sum + puny/ncat) ! normalize - enddo - - else - - call abort_ice(subname//'ERROR: ice_data_conc setting = '//trim(ice_data_conc), & - file=__FILE__, line=__LINE__) - - endif ! ice_data_conc - - !--------------------------------------------------------- - ! location of ice - !--------------------------------------------------------- - - if (trim(ice_data_type) == 'box2001') then - - ! place ice on left side of domain - icells = 0 - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j)) then - if (ULON(i,j) < -50./rad_to_deg) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif ! ULON - endif ! tmask - enddo ! i - enddo ! j - - elseif (trim(ice_data_type) == 'boxslotcyl') then - - ! Geometric configuration of the slotted cylinder - diam = p3 *dxrect*(nx_global-1) - center_x = p5 *dxrect*(nx_global-1) - center_y = p75*dyrect*(ny_global-1) - radius = p5*diam - width = p166*diam - length = c5*p166*diam - - icells = 0 - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j)) then - ! check if grid point is inside slotted cylinder - in_slot = (dxrect*real(iglob(i)-1, kind=dbl_kind) >= center_x - width/c2) .and. & - (dxrect*real(iglob(i)-1, kind=dbl_kind) <= center_x + width/c2) .and. & - (dyrect*real(jglob(j)-1, kind=dbl_kind) >= center_y - radius) .and. & - (dyrect*real(jglob(j)-1, kind=dbl_kind) <= center_y + (length - radius)) - - in_cyl = sqrt((dxrect*real(iglob(i)-1, kind=dbl_kind) - center_x)**c2 + & - (dyrect*real(jglob(j)-1, kind=dbl_kind) - center_y)**c2) <= radius - - if (in_cyl .and. .not. in_slot) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif - endif - enddo - enddo - - elseif (trim(ice_data_type) == 'uniform') then - ! all cells not land mask are ice - icells = 0 - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j)) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif - enddo - enddo - - elseif (trim(ice_data_type) == 'channel') then - ! channel ice in center of domain in i direction - icells = 0 - do j = jlo, jhi - do i = ilo, ihi - if (jglob(j) > ny_global/4 .and. jglob(j) < 3*nx_global/4) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif - enddo - enddo - - elseif (trim(ice_data_type) == 'block') then - ! ice in 50% of domain, not at edges - icells = 0 - iedge = int(real(nx_global,kind=dbl_kind) * 0.25) + 1 - jedge = int(real(ny_global,kind=dbl_kind) * 0.25) + 1 - do j = jlo, jhi - do i = ilo, ihi - if ((iglob(i) > iedge .and. iglob(i) < nx_global-iedge+1) .and. & - (jglob(j) > jedge .and. jglob(j) < ny_global-jedge+1)) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif - enddo - enddo - - elseif (trim(ice_data_type) == 'eastblock') then - ! block on east half of domain in center of domain - icells = 0 - do j = jlo, jhi - do i = ilo, ihi - if (jglob(j) > ny_global/4 .and. jglob(j) < 3*nx_global/4 .and. & - iglob(i) >= nx_global/2) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif - enddo - enddo - - elseif (trim(ice_data_type) == 'latsst') then - - !----------------------------------------------------------------- - ! Place ice where ocean surface is cold. - ! Note: If SST is not read from a file, then the ocean is assumed - ! to be at its freezing point everywhere, and ice will - ! extend to the prescribed edges. - !----------------------------------------------------------------- - - icells = 0 - do j = jlo, jhi - do i = ilo, ihi - if (tmask(i,j)) then - ! place ice in high latitudes where ocean sfc is cold - if ( (sst (i,j) <= Tf(i,j)+p2) .and. & - (TLAT(i,j) < edge_init_sh/rad_to_deg .or. & - TLAT(i,j) > edge_init_nh/rad_to_deg) ) then - icells = icells + 1 - indxi(icells) = i - indxj(icells) = j - endif ! cold surface - endif ! tmask - enddo ! i - enddo ! j - - else - - call abort_ice(subname//'ERROR: ice_data_type setting = '//trim(ice_data_type), & - file=__FILE__, line=__LINE__) - - endif ! ice_data_type - - !--------------------------------------------------------- - ! ice distribution - !--------------------------------------------------------- - - do n = 1, ncat - - ! ice volume, snow volume - do ij = 1, icells - i = indxi(ij) - j = indxj(ij) - - aicen(i,j,n) = ainit(n) - - if (trim(ice_data_dist) == 'box2001') then - if (hinit(n) > c0) then -! ! varies linearly from 0 to 1 in x direction - aicen(i,j,n) = (real(iglob(i), kind=dbl_kind)-p5) & - / (real(nx_global,kind=dbl_kind)) -! ! constant slope from 0 to 0.5 in x direction -! aicen(i,j,n) = (real(iglob(i), kind=dbl_kind)-p5) & -! / (real(nx_global,kind=dbl_kind)) * p5 -! ! quadratic -! aicen(i,j,n) = max(c0,(real(iglob(i), kind=dbl_kind)-p5) & -! / (real(nx_global,kind=dbl_kind)) & -! * (real(jglob(j), kind=dbl_kind)-p5) & -! / (real(ny_global,kind=dbl_kind)) * p5) -! aicen(i,j,n) = max(c0,(real(nx_global, kind=dbl_kind) & -! - real(iglob(i), kind=dbl_kind)-p5) & -! / (real(nx_global,kind=dbl_kind)) & -! * (real(ny_global, kind=dbl_kind) & -! - real(jglob(j), kind=dbl_kind)-p5) & -! / (real(ny_global,kind=dbl_kind)) * p5) - endif - - elseif (trim(ice_data_dist) == 'gauss') then - if (hinit(n) > c0) then - dist_ratio = 8._dbl_kind * & - sqrt((real(iglob(i),kind=dbl_kind)-real(nx_global+1,kind=dbl_kind)/c2)**2 + & - (real(jglob(j),kind=dbl_kind)-real(ny_global+1,kind=dbl_kind)/c2)**2) / & - sqrt((real(nx_global,kind=dbl_kind))**2 + & - (real(ny_global,kind=dbl_kind))**2) - aicen(i,j,n) = ainit(n) * exp(-dist_ratio) - endif - - elseif (trim(ice_data_dist) == 'uniform') then - - ! nothing extra to do - - else - - call abort_ice(subname//'ERROR: ice_data_dist setting = '//trim(ice_data_dist), & - file=__FILE__, line=__LINE__) - - endif ! ice_data_dist - - vicen(i,j,n) = hinit(n) * aicen(i,j,n) ! m - vsnon(i,j,n) = min(aicen(i,j,n)*hsno_init,p2*vicen(i,j,n)) - - call icepack_init_trcr(Tair = Tair(i,j), Tf = Tf(i,j), & - Sprofile = salinz(i,j,:), & - Tprofile = Tmltz(i,j,:), & - Tsfc = Tsfc, & - nilyr = nilyr, nslyr = nslyr, & - qin = qin(:), qsn = qsn(:)) - - ! surface temperature - trcrn(i,j,nt_Tsfc,n) = Tsfc ! deg C - ! ice enthalpy, salinity - do k = 1, nilyr - trcrn(i,j,nt_qice+k-1,n) = qin(k) - trcrn(i,j,nt_sice+k-1,n) = salinz(i,j,k) - enddo - ! snow enthalpy - do k = 1, nslyr - trcrn(i,j,nt_qsno+k-1,n) = qsn(k) - enddo ! nslyr - ! brine fraction - if (tr_brine) trcrn(i,j,nt_fbri,n) = c1 - - enddo ! ij - enddo ! ncat - - !--------------------------------------------------------- - ! ice velocity - ! these velocites are defined on B-grid - !--------------------------------------------------------- - - if (trim(ice_data_type) == 'boxslotcyl') then - domain_length = dxrect*cm_to_m*nx_global - period = c12*secday ! 12 days rotational period - max_vel = pi*domain_length/period - do j = 1, ny_block - do i = 1, nx_block - - uvel(i,j) = c2*max_vel*(real(jglob(j), kind=dbl_kind) - p5) & - / real(ny_global - 1, kind=dbl_kind) - max_vel - vvel(i,j) = -c2*max_vel*(real(iglob(i), kind=dbl_kind) - p5) & - / real(nx_global - 1, kind=dbl_kind) + max_vel - enddo ! j - enddo ! i - else - uvel = c0 - vvel = c0 - endif - - endif ! ice_ic - - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - end subroutine set_state_var - -!======================================================================= - - end module ice_init - -!======================================================================= diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 deleted file mode 100644 index dfccdd413..000000000 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ /dev/null @@ -1,4667 +0,0 @@ -#ifdef ncdf -#define USE_NETCDF -#endif -!======================================================================= - -! Spatial grids, masks, and boundary conditions -! -! authors: Elizabeth C. Hunke and William H. Lipscomb, LANL -! Tony Craig, NCAR -! -! 2004: Block structure added by William Lipscomb -! init_grid split into two parts as in POP 2.0 -! Boundary update routines replaced by POP versions -! 2006: Converted to free source form (F90) by Elizabeth Hunke -! 2007: Option to read from netcdf files (A. Keen, Met Office) -! Grid reading routines reworked by E. Hunke for boundary values -! 2021: Add N (center of north face) and E (center of east face) grids -! to support C and CD solvers. Defining T at center of cells, U at -! NE corner, N at center of top face, E at center of right face. -! All cells are quadrilaterals with NE, E, and N associated with -! directions relative to logical grid. - - module ice_grid - - use ice_kinds_mod - use ice_broadcast, only: broadcast_scalar, broadcast_array - use ice_boundary, only: ice_HaloUpdate, ice_HaloExtrapolate, & - primary_grid_lengths_global_ext - use ice_communicate, only: my_task, master_task - use ice_blocks, only: block, get_block, nx_block, ny_block, nghost - use ice_domain_size, only: nx_global, ny_global, max_blocks - use ice_domain, only: blocks_ice, nblocks, halo_info, distrb_info, & - ew_boundary_type, ns_boundary_type, init_domain_distribution - use ice_fileunits, only: nu_diag, nu_grid, nu_kmt, & - get_fileunit, release_fileunit, flush_fileunit - use ice_gather_scatter, only: gather_global, scatter_global - use ice_read_write, only: ice_read, ice_read_nc, ice_read_global, & - ice_read_global_nc, ice_open, ice_open_nc, ice_close_nc - use ice_timers, only: timer_bound, ice_timer_start, ice_timer_stop - use ice_exit, only: abort_ice - use ice_global_reductions, only: global_minval, global_maxval - use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted - use icepack_intfc, only: icepack_query_parameters, icepack_init_parameters - - implicit none - private - public :: init_grid1, init_grid2, grid_average_X2Y, & - alloc_grid, makemask, grid_neighbor_min, grid_neighbor_max - - character (len=char_len_long), public :: & - grid_format , & ! file format ('bin'=binary or 'nc'=netcdf) - gridcpl_file , & ! input file for POP coupling grid info - grid_file , & ! input file for POP grid info - kmt_file , & ! input file for POP grid info - kmt_type , & ! options are file, default, boxislands - bathymetry_file, & ! input bathymetry for seabed stress - bathymetry_format, & ! bathymetry file format (default or pop) - grid_spacing , & ! default of 30.e3m or set by user in namelist - grid_ice , & ! Underlying model grid structure (A, B, C, CD) - grid_ice_thrm, & ! ocean forcing grid for thermo fields (T, U, N, E) - grid_ice_dynu, & ! ocean forcing grid for dyn U fields (T, U, N, E) - grid_ice_dynv, & ! ocean forcing grid for dyn V fields (T, U, N, E) - grid_atm , & ! atmos forcing grid structure (A, B, C, CD) - grid_atm_thrm, & ! atmos forcing grid for thermo fields (T, U, N, E) - grid_atm_dynu, & ! atmos forcing grid for dyn U fields (T, U, N, E) - grid_atm_dynv, & ! atmos forcing grid for dyn V fields (T, U, N, E) - grid_ocn , & ! ocean forcing grid structure (A B, C, CD) - grid_ocn_thrm, & ! ocean forcing grid for thermo fields (T, U, N, E) - grid_ocn_dynu, & ! ocean forcing grid for dyn U fields (T, U, N, E) - grid_ocn_dynv, & ! ocean forcing grid for dyn V fields (T, U, N, E) - grid_type ! current options are rectangular (default), - ! displaced_pole, tripole, regional - - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - dxT , & ! width of T-cell through the middle (m) - dyT , & ! height of T-cell through the middle (m) - dxU , & ! width of U-cell through the middle (m) - dyU , & ! height of U-cell through the middle (m) - dxN , & ! width of N-cell through the middle (m) - dyN , & ! height of N-cell through the middle (m) - dxE , & ! width of E-cell through the middle (m) - dyE , & ! height of E-cell through the middle (m) - HTE , & ! length of eastern edge of T-cell (m) - HTN , & ! length of northern edge of T-cell (m) - tarea , & ! area of T-cell (m^2) - uarea , & ! area of U-cell (m^2) - narea , & ! area of N-cell (m^2) - earea , & ! area of E-cell (m^2) - tarear , & ! 1/tarea - uarear , & ! 1/uarea - narear , & ! 1/narea - earear , & ! 1/earea - tarean , & ! area of NH T-cells - tareas , & ! area of SH T-cells - ULON , & ! longitude of velocity pts, NE corner of T pts (radians) - ULAT , & ! latitude of velocity pts, NE corner of T pts (radians) - TLON , & ! longitude of temp (T) pts (radians) - TLAT , & ! latitude of temp (T) pts (radians) - NLON , & ! longitude of center of north face of T pts (radians) - NLAT , & ! latitude of center of north face of T pts (radians) - ELON , & ! longitude of center of east face of T pts (radians) - ELAT , & ! latitude of center of east face of T pts (radians) - ANGLE , & ! for conversions between POP grid and lat/lon - ANGLET , & ! ANGLE converted to T-cells - bathymetry , & ! ocean depth, for grounding keels and bergs (m) - ocn_gridcell_frac ! only relevant for lat-lon grids - ! gridcell value of [1 - (land fraction)] (T-cell) - - real (kind=dbl_kind), dimension (:,:), allocatable, public :: & - G_HTE , & ! length of eastern edge of T-cell (global ext.) - G_HTN ! length of northern edge of T-cell (global ext.) - - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - cyp , & ! 1.5*HTE(i,j)-0.5*HTW(i,j) = 1.5*HTE(i,j)-0.5*HTE(i-1,j) - cxp , & ! 1.5*HTN(i,j)-0.5*HTS(i,j) = 1.5*HTN(i,j)-0.5*HTN(i,j-1) - cym , & ! 0.5*HTE(i,j)-1.5*HTW(i,j) = 0.5*HTE(i,j)-1.5*HTE(i-1,j) - cxm , & ! 0.5*HTN(i,j)-1.5*HTS(i,j) = 0.5*HTN(i,j)-1.5*HTN(i,j-1) - dxhy , & ! 0.5*(HTE(i,j) - HTW(i,j)) = 0.5*(HTE(i,j) - HTE(i-1,j)) - dyhx ! 0.5*(HTN(i,j) - HTS(i,j)) = 0.5*(HTN(i,j) - HTN(i,j-1)) - - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - ratiodxN , & ! - dxN(i+1,j) / dxN(i,j) - ratiodyE , & ! - dyE(i ,j+1) / dyE(i,j) - ratiodxNr , & ! 1 / ratiodxN - ratiodyEr ! 1 / ratiodyE - - ! grid dimensions for rectangular grid - real (kind=dbl_kind), public :: & - dxrect, & ! user_specified spacing (cm) in x-direction (uniform HTN) - dyrect ! user_specified spacing (cm) in y-direction (uniform HTE) - - ! growth factor for variable spaced grid - real (kind=dbl_kind), public :: & - dxscale, & ! scale factor for grid spacing in x direction (e.g., 1.02) - dyscale ! scale factor for gird spacing in y direction (e.g., 1.02) - - real (kind=dbl_kind), public :: & - lonrefrect, & ! lower left lon for rectgrid - latrefrect ! lower left lat for rectgrid - - ! Corners of grid boxes for history output - real (kind=dbl_kind), dimension (:,:,:,:), allocatable, public :: & - lont_bounds, & ! longitude of gridbox corners for T point - latt_bounds, & ! latitude of gridbox corners for T point - lonu_bounds, & ! longitude of gridbox corners for U point - latu_bounds, & ! latitude of gridbox corners for U point - lonn_bounds, & ! longitude of gridbox corners for N point - latn_bounds, & ! latitude of gridbox corners for N point - lone_bounds, & ! longitude of gridbox corners for E point - late_bounds ! latitude of gridbox corners for E point - - ! geometric quantities used for remapping transport - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - xav , & ! mean T-cell value of x - yav , & ! mean T-cell value of y - xxav , & ! mean T-cell value of xx -! xyav , & ! mean T-cell value of xy -! yyav , & ! mean T-cell value of yy - yyav ! mean T-cell value of yy -! xxxav, & ! mean T-cell value of xxx -! xxyav, & ! mean T-cell value of xxy -! xyyav, & ! mean T-cell value of xyy -! yyyav ! mean T-cell value of yyy - - real (kind=dbl_kind), & - dimension (:,:,:,:,:), allocatable, public :: & - mne, & ! matrices used for coordinate transformations in remapping - mnw, & ! ne = northeast corner, nw = northwest, etc. - mse, & - msw - - ! masks - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - hm , & ! land/boundary mask, thickness (T-cell) - bm , & ! task/block id - uvm , & ! land/boundary mask (U-cell) - npm , & ! land/boundary mask (N-cell) - epm , & ! land/boundary mask (E-cell) - kmt ! ocean topography mask for bathymetry (T-cell) - - logical (kind=log_kind), public :: & - use_bathymetry, & ! flag for reading in bathymetry_file - pgl_global_ext, & ! flag for init primary grid lengths (global ext.) - scale_dxdy ! flag to apply scale factor to vary dx/dy in rectgrid - - logical (kind=log_kind), dimension (:,:,:), allocatable, public :: & - tmask , & ! land/boundary mask, thickness (T-cell) - umask , & ! land/boundary mask (U-cell) (1 if all surrounding T cells are ocean) - umaskCD, & ! land/boundary mask (U-cell) (1 if at least two surrounding T cells are ocean) - nmask , & ! land/boundary mask, (N-cell) - emask , & ! land/boundary mask, (E-cell) - lmask_n, & ! northern hemisphere mask - lmask_s ! southern hemisphere mask - - real (kind=dbl_kind), dimension (:,:,:), allocatable, public :: & - rndex_global ! global index for local subdomain (dbl) - - logical (kind=log_kind), private :: & - l_readCenter ! If anglet exist in grid file read it otherwise calculate it - - interface grid_average_X2Y - module procedure grid_average_X2Y_base , & - grid_average_X2Y_userwghts, & - grid_average_X2Y_NEversion - end interface - -!======================================================================= - - contains - -!======================================================================= -! -! Allocate space for all variables -! - subroutine alloc_grid - - integer (int_kind) :: ierr - - character(len=*), parameter :: subname = '(alloc_grid)' - - allocate( & - dxT (nx_block,ny_block,max_blocks), & ! width of T-cell through the middle (m) - dyT (nx_block,ny_block,max_blocks), & ! height of T-cell through the middle (m) - dxU (nx_block,ny_block,max_blocks), & ! width of U-cell through the middle (m) - dyU (nx_block,ny_block,max_blocks), & ! height of U-cell through the middle (m) - dxN (nx_block,ny_block,max_blocks), & ! width of N-cell through the middle (m) - dyN (nx_block,ny_block,max_blocks), & ! height of N-cell through the middle (m) - dxE (nx_block,ny_block,max_blocks), & ! width of E-cell through the middle (m) - dyE (nx_block,ny_block,max_blocks), & ! height of E-cell through the middle (m) - HTE (nx_block,ny_block,max_blocks), & ! length of eastern edge of T-cell (m) - HTN (nx_block,ny_block,max_blocks), & ! length of northern edge of T-cell (m) - tarea (nx_block,ny_block,max_blocks), & ! area of T-cell (m^2) - uarea (nx_block,ny_block,max_blocks), & ! area of U-cell (m^2) - narea (nx_block,ny_block,max_blocks), & ! area of N-cell (m^2) - earea (nx_block,ny_block,max_blocks), & ! area of E-cell (m^2) - tarear (nx_block,ny_block,max_blocks), & ! 1/tarea - uarear (nx_block,ny_block,max_blocks), & ! 1/uarea - narear (nx_block,ny_block,max_blocks), & ! 1/narea - earear (nx_block,ny_block,max_blocks), & ! 1/earea - tarean (nx_block,ny_block,max_blocks), & ! area of NH T-cells - tareas (nx_block,ny_block,max_blocks), & ! area of SH T-cells - ULON (nx_block,ny_block,max_blocks), & ! longitude of U pts, NE corner (radians) - ULAT (nx_block,ny_block,max_blocks), & ! latitude of U pts, NE corner (radians) - TLON (nx_block,ny_block,max_blocks), & ! longitude of T pts (radians) - TLAT (nx_block,ny_block,max_blocks), & ! latitude of T pts (radians) - NLON (nx_block,ny_block,max_blocks), & ! longitude of N pts, N face (radians) - NLAT (nx_block,ny_block,max_blocks), & ! latitude of N pts, N face (radians) - ELON (nx_block,ny_block,max_blocks), & ! longitude of E pts, E face (radians) - ELAT (nx_block,ny_block,max_blocks), & ! latitude of E pts, E face (radians) - ANGLE (nx_block,ny_block,max_blocks), & ! for conversions between POP grid and lat/lon - ANGLET (nx_block,ny_block,max_blocks), & ! ANGLE converted to T-cells - bathymetry(nx_block,ny_block,max_blocks),& ! ocean depth, for grounding keels and bergs (m) - ocn_gridcell_frac(nx_block,ny_block,max_blocks),& ! only relevant for lat-lon grids - cyp (nx_block,ny_block,max_blocks), & ! 1.5*HTE - 0.5*HTW - cxp (nx_block,ny_block,max_blocks), & ! 1.5*HTN - 0.5*HTS - cym (nx_block,ny_block,max_blocks), & ! 0.5*HTE - 1.5*HTW - cxm (nx_block,ny_block,max_blocks), & ! 0.5*HTN - 1.5*HTS - dxhy (nx_block,ny_block,max_blocks), & ! 0.5*(HTE - HTW) - dyhx (nx_block,ny_block,max_blocks), & ! 0.5*(HTN - HTS) - xav (nx_block,ny_block,max_blocks), & ! mean T-cell value of x - yav (nx_block,ny_block,max_blocks), & ! mean T-cell value of y - xxav (nx_block,ny_block,max_blocks), & ! mean T-cell value of xx - yyav (nx_block,ny_block,max_blocks), & ! mean T-cell value of yy - hm (nx_block,ny_block,max_blocks), & ! land/boundary mask, thickness (T-cell) - bm (nx_block,ny_block,max_blocks), & ! task/block id - uvm (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) - water in case of all water point - npm (nx_block,ny_block,max_blocks), & ! land/boundary mask (N-cell) - epm (nx_block,ny_block,max_blocks), & ! land/boundary mask (E-cell) - kmt (nx_block,ny_block,max_blocks), & ! ocean topography mask for bathymetry (T-cell) - tmask (nx_block,ny_block,max_blocks), & ! land/boundary mask, thickness (T-cell) - umask (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) - umaskCD (nx_block,ny_block,max_blocks), & ! land/boundary mask, velocity (U-cell) - nmask (nx_block,ny_block,max_blocks), & ! land/boundary mask (N-cell) - emask (nx_block,ny_block,max_blocks), & ! land/boundary mask (E-cell) - lmask_n (nx_block,ny_block,max_blocks), & ! northern hemisphere mask - lmask_s (nx_block,ny_block,max_blocks), & ! southern hemisphere mask - rndex_global(nx_block,ny_block,max_blocks), & ! global index for local subdomain (dbl) - lont_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for T point - latt_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for T point - lonu_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for U point - latu_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for U point - lonn_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for N point - latn_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for N point - lone_bounds(4,nx_block,ny_block,max_blocks), & ! longitude of gridbox corners for E point - late_bounds(4,nx_block,ny_block,max_blocks), & ! latitude of gridbox corners for E point - mne (2,2,nx_block,ny_block,max_blocks), & ! matrices used for coordinate transformations in remapping - mnw (2,2,nx_block,ny_block,max_blocks), & ! ne = northeast corner, nw = northwest, etc. - mse (2,2,nx_block,ny_block,max_blocks), & - msw (2,2,nx_block,ny_block,max_blocks), & - stat=ierr) - if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - allocate( & - ratiodxN (nx_block,ny_block,max_blocks), & - ratiodyE (nx_block,ny_block,max_blocks), & - ratiodxNr(nx_block,ny_block,max_blocks), & - ratiodyEr(nx_block,ny_block,max_blocks), & - stat=ierr) - if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') - endif - - if (pgl_global_ext) then - allocate( & - G_HTE(nx_global+2*nghost, ny_global+2*nghost), & ! length of eastern edge of T-cell (global ext.) - G_HTN(nx_global+2*nghost, ny_global+2*nghost), & ! length of northern edge of T-cell (global ext.) - stat=ierr) - if (ierr/=0) call abort_ice(subname//'ERROR: Out of memory') - endif - - end subroutine alloc_grid - -!======================================================================= - -! Distribute blocks across processors. The distribution is optimized -! based on latitude and topography, contained in the ULAT and KMT arrays. -! -! authors: William Lipscomb and Phil Jones, LANL - - subroutine init_grid1 - - use ice_blocks, only: nx_block, ny_block - use ice_broadcast, only: broadcast_array - use ice_constants, only: c1 - - integer (kind=int_kind) :: & - fid_grid, & ! file id for netCDF grid file - fid_kmt ! file id for netCDF kmt file - - character (char_len) :: & - fieldname ! field name in netCDF file - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1, work_g2 - - real (kind=dbl_kind) :: & - rad_to_deg - - character(len=*), parameter :: subname = '(init_grid1)' - - !----------------------------------------------------------------- - ! Get global ULAT and KMT arrays used for block decomposition. - !----------------------------------------------------------------- - - call icepack_query_parameters(rad_to_deg_out=rad_to_deg) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - allocate(work_g1(nx_global,ny_global)) - allocate(work_g2(nx_global,ny_global)) - - ! check tripole flags here - ! can't check in init_data because ns_boundary_type is not yet read - ! can't check in init_domain_blocks because grid_type is not accessible due to circular logic - - if (grid_type == 'tripole' .and. ns_boundary_type /= 'tripole' .and. & - ns_boundary_type /= 'tripoleT') then - call abort_ice(subname//'ERROR: grid_type tripole needs tripole ns_boundary_type', & - file=__FILE__, line=__LINE__) - endif - - if (trim(grid_type) == 'displaced_pole' .or. & - trim(grid_type) == 'tripole' .or. & - trim(grid_type) == 'regional' ) then - - if (trim(grid_format) == 'nc') then - - call ice_open_nc(grid_file,fid_grid) - call ice_open_nc(kmt_file,fid_kmt) - - fieldname='ulat' - call ice_read_global_nc(fid_grid,1,fieldname,work_g1,.true.) - fieldname='kmt' - call ice_read_global_nc(fid_kmt,1,fieldname,work_g2,.true.) - - if (my_task == master_task) then - call ice_close_nc(fid_grid) - call ice_close_nc(fid_kmt) - endif - - else - - call ice_open(nu_grid,grid_file,64) ! ULAT - call ice_open(nu_kmt, kmt_file, 32) ! KMT - - call ice_read_global(nu_grid,1,work_g1,'rda8',.true.) ! ULAT - call ice_read_global(nu_kmt, 1,work_g2,'ida4',.true.) ! KMT - - if (my_task == master_task) then - close (nu_grid) - close (nu_kmt) - endif - - endif - - else ! rectangular grid - - work_g1(:,:) = 75._dbl_kind/rad_to_deg ! arbitrary polar latitude - work_g2(:,:) = c1 - - endif - - call broadcast_array(work_g1, master_task) ! ULAT - call broadcast_array(work_g2, master_task) ! KMT - - !----------------------------------------------------------------- - ! distribute blocks among processors - !----------------------------------------------------------------- - - call init_domain_distribution(work_g2, work_g1, grid_ice) ! KMT, ULAT - - deallocate(work_g1) - deallocate(work_g2) - - !----------------------------------------------------------------- - ! write additional domain information - !----------------------------------------------------------------- - - if (my_task == master_task) then - write(nu_diag,'(a26,i6)') ' Block size: nx_block = ',nx_block - write(nu_diag,'(a26,i6)') ' ny_block = ',ny_block - endif - - end subroutine init_grid1 - -!======================================================================= - -! Horizontal grid initialization: -! -! U{LAT,LONG} = true {latitude,longitude} of U points -! HT{N,E} = cell widths on {N,E} sides of T cell -! ANGLE = angle between local x direction and true east -! hm = land mask (c1 for ocean points, c0 for land points) -! D{X,Y}{T,U} = {x,y} spacing centered at {T,U} points -! T-grid and ghost cell values -! Various grid quantities needed for dynamics and transport -! -! author: Elizabeth C. Hunke, LANL - - subroutine init_grid2 - - use ice_blocks, only: get_block, block, nx_block, ny_block - use ice_constants, only: c0, c1, c2, p5, p25, c1p5, & - field_loc_center, field_loc_NEcorner, field_loc_Nface, field_loc_Eface, & - field_type_scalar, field_type_vector, field_type_angle - use ice_domain_size, only: max_blocks -#if defined (_OPENMP) - use OMP_LIB -#endif - - integer (kind=int_kind) :: & - i, j, iblk, & - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - real (kind=dbl_kind) :: & - angle_0, angle_w, angle_s, angle_sw, & - pi, pi2, puny - - logical (kind=log_kind), dimension(nx_block,ny_block,max_blocks):: & - out_of_range - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1 - - type (block) :: & - this_block ! block information for current block - -#if defined (_OPENMP) - integer(kind=omp_sched_kind) :: ompsk ! openmp schedule - integer(kind=int_kind) :: ompcs ! openmp schedule count -#endif - - character(len=*), parameter :: subname = '(init_grid2)' - - !----------------------------------------------------------------- - ! lat, lon, cell widths, angle, land mask - !----------------------------------------------------------------- - - l_readCenter = .false. - - call icepack_query_parameters(pi_out=pi, pi2_out=pi2, puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - if (trim(grid_type) == 'displaced_pole' .or. & - trim(grid_type) == 'tripole' .or. & - trim(grid_type) == 'regional' ) then - if (trim(grid_format) == 'nc') then - call popgrid_nc ! read POP grid lengths from nc file - else - call popgrid ! read POP grid lengths directly - endif -#ifdef CESMCOUPLED - elseif (trim(grid_type) == 'latlon') then - call latlongrid ! lat lon grid for sequential CESM (CAM mode) - return -#endif - elseif (trim(grid_type) == 'cpom_grid') then - call cpomgrid ! cpom model orca1 type grid - else - call rectgrid ! regular rectangular grid - endif - - !----------------------------------------------------------------- - ! Diagnose OpenMP thread schedule, force order in output - !----------------------------------------------------------------- - -#if defined (_OPENMP) - !$OMP PARALLEL DO ORDERED PRIVATE(iblk) SCHEDULE(runtime) - do iblk = 1, nblocks - if (my_task == master_task) then - !$OMP ORDERED - if (iblk == 1) then - call omp_get_schedule(ompsk,ompcs) - write(nu_diag,*) '' - write(nu_diag,*) subname,' OpenMP runtime thread schedule:' - write(nu_diag,*) subname,' omp schedule = ',ompsk,ompcs - endif - write(nu_diag,*) subname,' block, thread = ',iblk,OMP_GET_THREAD_NUM() - call flush_fileunit(nu_diag) - !$OMP END ORDERED - endif - enddo - !$OMP END PARALLEL DO -#endif - - !----------------------------------------------------------------- - ! T-grid cell and U-grid cell quantities - ! Fill halo data locally where possible to avoid missing - ! data associated with land block elimination - ! Note: HTN, HTE, dx*, dy* are all defined from global arrays - ! at halos. - !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = 1,ny_block - do i = 1,nx_block - tarea(i,j,iblk) = dxT(i,j,iblk)*dyT(i,j,iblk) - uarea(i,j,iblk) = dxU(i,j,iblk)*dyU(i,j,iblk) - narea(i,j,iblk) = dxN(i,j,iblk)*dyN(i,j,iblk) - earea(i,j,iblk) = dxE(i,j,iblk)*dyE(i,j,iblk) - - if (tarea(i,j,iblk) > c0) then - tarear(i,j,iblk) = c1/tarea(i,j,iblk) - else - tarear(i,j,iblk) = c0 ! possible on boundaries - endif - if (uarea(i,j,iblk) > c0) then - uarear(i,j,iblk) = c1/uarea(i,j,iblk) - else - uarear(i,j,iblk) = c0 ! possible on boundaries - endif - if (narea(i,j,iblk) > c0) then - narear(i,j,iblk) = c1/narea(i,j,iblk) - else - narear(i,j,iblk) = c0 ! possible on boundaries - endif - if (earea(i,j,iblk) > c0) then - earear(i,j,iblk) = c1/earea(i,j,iblk) - else - earear(i,j,iblk) = c0 ! possible on boundaries - endif - - enddo - enddo - - do j = jlo, jhi - do i = ilo, ihi - dxhy(i,j,iblk) = p5*(HTE(i,j,iblk) - HTE(i-1,j,iblk)) - dyhx(i,j,iblk) = p5*(HTN(i,j,iblk) - HTN(i,j-1,iblk)) - enddo - enddo - - do j = jlo, jhi+1 - do i = ilo, ihi+1 - cyp(i,j,iblk) = (c1p5*HTE(i,j,iblk) - p5*HTE(i-1,j,iblk)) - cxp(i,j,iblk) = (c1p5*HTN(i,j,iblk) - p5*HTN(i,j-1,iblk)) - ! match order of operations in cyp, cxp for tripole grids - cym(i,j,iblk) = -(c1p5*HTE(i-1,j,iblk) - p5*HTE(i,j,iblk)) - cxm(i,j,iblk) = -(c1p5*HTN(i,j-1,iblk) - p5*HTN(i,j,iblk)) - enddo - enddo - - if (grid_ice == 'CD' .or. grid_ice == 'C') then - do j = jlo, jhi - do i = ilo, ihi - ratiodxN (i,j,iblk) = - dxN(i+1,j ,iblk) / dxN(i,j,iblk) - ratiodyE (i,j,iblk) = - dyE(i ,j+1,iblk) / dyE(i,j,iblk) - ratiodxNr(i,j,iblk) = c1 / ratiodxN(i,j,iblk) - ratiodyEr(i,j,iblk) = c1 / ratiodyE(i,j,iblk) - enddo - enddo - endif - - enddo ! iblk - !$OMP END PARALLEL DO - - !----------------------------------------------------------------- - ! Ghost cell updates - ! On the tripole grid, one must be careful with updates of - ! quantities that involve a difference of cell lengths. - ! For example, dyhx and dxhy are cell-centered vector components. - ! Also note that on the tripole grid, cxp and cxm would swap places, - ! as would cyp and cym. These quantities are computed only - ! in north and east ghost cells (above), not south and west. - !----------------------------------------------------------------- - - call ice_timer_start(timer_bound) - - call ice_HaloUpdate (dxhy, halo_info, & - field_loc_center, field_type_vector, & - fillValue=c1) - call ice_HaloUpdate (dyhx, halo_info, & - field_loc_center, field_type_vector, & - fillValue=c1) - - ! Update just on the tripole seam to ensure bit-for-bit symmetry across seam - call ice_HaloUpdate (tarea, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1, tripoleOnly=.true.) - call ice_HaloUpdate (uarea, halo_info, & - field_loc_NEcorner, field_type_scalar, & - fillValue=c1, tripoleOnly=.true.) - call ice_HaloUpdate (tarear, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1, tripoleOnly=.true.) - call ice_HaloUpdate (uarear, halo_info, & - field_loc_NEcorner, field_type_scalar, & - fillValue=c1, tripoleOnly=.true.) - - call ice_timer_stop(timer_bound) - - !----------------------------------------------------------------- - ! Calculate ANGLET to be compatible with POP ocean model - ! First, ensure that -pi <= ANGLE <= pi - !----------------------------------------------------------------- - - out_of_range = .false. - where (ANGLE < -pi .or. ANGLE > pi) out_of_range = .true. - if (count(out_of_range) > 0) then - write(nu_diag,*) subname,' angle = ',minval(ANGLE),maxval(ANGLE),count(out_of_range) - call abort_ice (subname//' ANGLE out of expected range', & - file=__FILE__, line=__LINE__) - endif - - !----------------------------------------------------------------- - ! Compute ANGLE on T-grid - !----------------------------------------------------------------- - if (trim(grid_type) == 'cpom_grid') then - ANGLET(:,:,:) = ANGLE(:,:,:) - else if (.not. (l_readCenter)) then - ANGLET = c0 - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & - !$OMP angle_0,angle_w,angle_s,angle_sw) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - angle_0 = ANGLE(i ,j ,iblk) ! w----0 - angle_w = ANGLE(i-1,j ,iblk) ! | | - angle_s = ANGLE(i, j-1,iblk) ! | | - angle_sw = ANGLE(i-1,j-1,iblk) ! sw---s - ANGLET(i,j,iblk) = atan2(p25*(sin(angle_0)+ & - sin(angle_w)+ & - sin(angle_s)+ & - sin(angle_sw)),& - p25*(cos(angle_0)+ & - cos(angle_w)+ & - cos(angle_s)+ & - cos(angle_sw))) - enddo - enddo - enddo - !$OMP END PARALLEL DO - endif ! cpom_grid - if (trim(grid_type) == 'regional' .and. & - (.not. (l_readCenter))) then - ! for W boundary extrapolate from interior - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - i = ilo - if (this_block%i_glob(i) == 1) then - do j = jlo, jhi - ANGLET(i,j,iblk) = c2*ANGLET(i+1,j,iblk)-ANGLET(i+2,j,iblk) - enddo - endif - enddo - !$OMP END PARALLEL DO - endif ! regional - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (ANGLET, halo_info, & - field_loc_center, field_type_angle, & - fillValue=c1) - call ice_timer_stop(timer_bound) - - call makemask ! velocity mask, hemisphere masks - if (.not. (l_readCenter)) then - call Tlatlon ! get lat, lon on the T grid - endif - !----------------------------------------------------------------- - ! bathymetry - !----------------------------------------------------------------- - - if (trim(bathymetry_format) == 'default') then - call get_bathymetry - elseif (trim(bathymetry_format) == 'pop') then - call get_bathymetry_popfile - else - call abort_ice(subname//'ERROR: bathymetry_format value must be default or pop', & - file=__FILE__, line=__LINE__) - endif - - !---------------------------------------------------------------- - ! Corner coordinates for CF compliant history files - !---------------------------------------------------------------- - - call gridbox_corners - call gridbox_edges - - !----------------------------------------------------------------- - ! Compute global index (used for unpacking messages from coupler) - !----------------------------------------------------------------- - - if (my_task==master_task) then - allocate(work_g1(nx_global,ny_global)) - do j=1,ny_global - do i=1,nx_global - work_g1(i,j) = real((j-1)*nx_global + i,kind=dbl_kind) - enddo - enddo - else - allocate(work_g1(1,1)) ! to save memory - endif - - call scatter_global(rndex_global, work_g1, & - master_task, distrb_info, & - field_loc_center, field_type_scalar) - - deallocate(work_g1) - - end subroutine init_grid2 - -!======================================================================= - -! POP displaced pole grid and land mask (or tripole). -! Grid record number, field and units are: \\ -! (1) ULAT (radians) \\ -! (2) ULON (radians) \\ -! (3) HTN (cm) \\ -! (4) HTE (cm) \\ -! (5) HUS (cm) \\ -! (6) HUW (cm) \\ -! (7) ANGLE (radians) -! -! Land mask record number and field is (1) KMT. -! -! author: Elizabeth C. Hunke, LANL - - subroutine popgrid - - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c1, p5, & - field_loc_center, field_loc_NEcorner, & - field_type_scalar, field_type_angle - use ice_domain_size, only: max_blocks - - integer (kind=int_kind) :: & - i, j, iblk, & - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - logical (kind=log_kind) :: diag - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1 - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 - - type (block) :: & - this_block ! block information for current block - - character(len=*), parameter :: subname = '(popgrid)' - - call ice_open(nu_grid,grid_file,64) - call ice_open(nu_kmt,kmt_file,32) - - diag = .true. ! write diagnostic info - - !----------------------------------------------------------------- - ! topography - !----------------------------------------------------------------- - - call ice_read(nu_kmt,1,work1,'ida4',diag, & - field_loc=field_loc_center, & - field_type=field_type_scalar) - - hm (:,:,:) = c0 - kmt(:,:,:) = c0 - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - kmt(i,j,iblk) = work1(i,j,iblk) - if (kmt(i,j,iblk) >= p5) hm(i,j,iblk) = c1 - enddo - enddo - enddo - !$OMP END PARALLEL DO - - !----------------------------------------------------------------- - ! lat, lon, angle - !----------------------------------------------------------------- - - allocate(work_g1(nx_global,ny_global)) - - call ice_read_global(nu_grid,1,work_g1,'rda8',.true.) ! ULAT - call gridbox_verts(work_g1,latt_bounds) - call scatter_global(ULAT, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULAT, distrb_info, & - ew_boundary_type, ns_boundary_type) - - call ice_read_global(nu_grid,2,work_g1,'rda8',.true.) ! ULON - call gridbox_verts(work_g1,lont_bounds) - call scatter_global(ULON, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULON, distrb_info, & - ew_boundary_type, ns_boundary_type) - - call ice_read_global(nu_grid,7,work_g1,'rda8',.true.) ! ANGLE - call scatter_global(ANGLE, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_angle) - - !----------------------------------------------------------------- - ! cell dimensions - ! calculate derived quantities from global arrays to preserve - ! information on boundaries - !----------------------------------------------------------------- - - call ice_read_global(nu_grid,3,work_g1,'rda8',.true.) ! HTN - call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE - - call ice_read_global(nu_grid,4,work_g1,'rda8',.true.) ! HTE - call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE - - deallocate(work_g1) - - if (my_task == master_task) then - close (nu_grid) - close (nu_kmt) - endif - - end subroutine popgrid - -!======================================================================= - -! POP displaced pole grid and land mask. -! Grid record number, field and units are: \\ -! (1) ULAT (radians) \\ -! (2) ULON (radians) \\ -! (3) HTN (cm) \\ -! (4) HTE (cm) \\ -! (5) HUS (cm) \\ -! (6) HUW (cm) \\ -! (7) ANGLE (radians) -! -! Land mask record number and field is (1) KMT. -! -! author: Elizabeth C. Hunke, LANL -! Revised for netcdf input: Ann Keen, Met Office, May 2007 - - subroutine popgrid_nc - - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c1, & - field_loc_center, field_loc_NEcorner, & - field_type_scalar, field_type_angle - use ice_domain_size, only: max_blocks -#ifdef USE_NETCDF - use netcdf -#endif - - integer (kind=int_kind) :: & - i, j, iblk, & - ilo,ihi,jlo,jhi, & ! beginning and end of physical domain - fid_grid, & ! file id for netCDF grid file - fid_kmt ! file id for netCDF kmt file - - logical (kind=log_kind) :: diag - - character (char_len) :: & - fieldname ! field name in netCDF file - - real (kind=dbl_kind) :: & - pi - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1 - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 - - type (block) :: & - this_block ! block information for current block - - integer(kind=int_kind) :: & - varid - integer (kind=int_kind) :: & - status ! status flag - - - character(len=*), parameter :: subname = '(popgrid_nc)' - -#ifdef USE_NETCDF - call icepack_query_parameters(pi_out=pi) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - call ice_open_nc(grid_file,fid_grid) - call ice_open_nc(kmt_file,fid_kmt) - - diag = .true. ! write diagnostic info - !----------------------------------------------------------------- - ! topography - !----------------------------------------------------------------- - - fieldname='kmt' - call ice_read_nc(fid_kmt,1,fieldname,work1,diag, & - field_loc=field_loc_center, & - field_type=field_type_scalar) - - hm (:,:,:) = c0 - kmt(:,:,:) = c0 - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - kmt(i,j,iblk) = work1(i,j,iblk) - if (kmt(i,j,iblk) >= c1) hm(i,j,iblk) = c1 - enddo - enddo - enddo - !$OMP END PARALLEL DO - - !----------------------------------------------------------------- - ! lat, lon, angle - !----------------------------------------------------------------- - - allocate(work_g1(nx_global,ny_global)) - - fieldname='ulat' - call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! ULAT - call gridbox_verts(work_g1,latt_bounds) - call scatter_global(ULAT, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULAT, distrb_info, & - ew_boundary_type, ns_boundary_type) - - fieldname='ulon' - call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! ULON - call gridbox_verts(work_g1,lont_bounds) - call scatter_global(ULON, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULON, distrb_info, & - ew_boundary_type, ns_boundary_type) - - fieldname='angle' - call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! ANGLE - call scatter_global(ANGLE, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_angle) - ! fix ANGLE: roundoff error due to single precision - where (ANGLE > pi) ANGLE = pi - where (ANGLE < -pi) ANGLE = -pi - - ! if grid file includes anglet then read instead - fieldname='anglet' - if (my_task == master_task) then - status = nf90_inq_varid(fid_grid, trim(fieldname) , varid) - if (status /= nf90_noerr) then - write(nu_diag,*) subname//' CICE will calculate angleT, TLON and TLAT' - else - write(nu_diag,*) subname//' angleT, TLON and TLAT is read from grid file' - l_readCenter = .true. - endif - endif - call broadcast_scalar(l_readCenter,master_task) - if (l_readCenter) then - call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) - call scatter_global(ANGLET, work_g1, master_task, distrb_info, & - field_loc_center, field_type_angle) - where (ANGLET > pi) ANGLET = pi - where (ANGLET < -pi) ANGLET = -pi - fieldname="tlon" - call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) - call scatter_global(TLON, work_g1, master_task, distrb_info, & - field_loc_center, field_type_scalar) - fieldname="tlat" - call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) - call scatter_global(TLAT, work_g1, master_task, distrb_info, & - field_loc_center, field_type_scalar) - endif - !----------------------------------------------------------------- - ! cell dimensions - ! calculate derived quantities from global arrays to preserve - ! information on boundaries - !----------------------------------------------------------------- - - fieldname='htn' - call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! HTN - call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE - fieldname='hte' - call ice_read_global_nc(fid_grid,1,fieldname,work_g1,diag) ! HTE - call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE - - deallocate(work_g1) - - if (my_task == master_task) then - call ice_close_nc(fid_grid) - call ice_close_nc(fid_kmt) - endif -#else - call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) -#endif - - end subroutine popgrid_nc - -#ifdef CESMCOUPLED -!======================================================================= - -! Read in kmt file that matches CAM lat-lon grid and has single column -! functionality -! author: Mariana Vertenstein -! 2007: Elizabeth Hunke upgraded to netcdf90 and cice ncdf calls - - subroutine latlongrid - -! use ice_boundary - use ice_domain_size - use ice_scam, only : scmlat, scmlon, single_column - use ice_constants, only: c0, c1, p5, p25, & - field_loc_center, field_type_scalar, radius -#ifdef USE_NETCDF - use netcdf -#endif - - integer (kind=int_kind) :: & - i, j, iblk - - integer (kind=int_kind) :: & - ni, nj, ncid, dimid, varid, ier - - type (block) :: & - this_block ! block information for current block - - integer (kind=int_kind) :: & - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - real (kind=dbl_kind) :: & - closelat, & ! Single-column latitude value - closelon, & ! Single-column longitude value - closelatidx, & ! Single-column latitude index to retrieve - closelonidx ! Single-column longitude index to retrieve - - integer (kind=int_kind) :: & - start(2), & ! Start index to read in - count(2) ! Number of points to read in - - integer (kind=int_kind) :: & - start3(3), & ! Start index to read in - count3(3) ! Number of points to read in - - integer (kind=int_kind) :: & - status ! status flag - - real (kind=dbl_kind), allocatable :: & - lats(:),lons(:),pos_lons(:), glob_grid(:,:) ! temporaries - - real (kind=dbl_kind) :: & - pos_scmlon,& ! temporary - pi, & - puny, & - scamdata ! temporary - - character(len=*), parameter :: subname = '(lonlatgrid)' - -#ifdef USE_NETCDF - !----------------------------------------------------------------- - ! - kmt file is actually clm fractional land file - ! - Determine consistency of dimensions - ! - Read in lon/lat centers in degrees from kmt file - ! - Read in ocean from "kmt" file (1 for ocean, 0 for land) - !----------------------------------------------------------------- - - call icepack_query_parameters(pi_out=pi, puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - ! Determine dimension of domain file and check for consistency - - if (my_task == master_task) then - call ice_open_nc(kmt_file, ncid) - - status = nf90_inq_dimid (ncid, 'ni', dimid) - status = nf90_inquire_dimension(ncid, dimid, len=ni) - status = nf90_inq_dimid (ncid, 'nj', dimid) - status = nf90_inquire_dimension(ncid, dimid, len=nj) - end if - - ! Determine start/count to read in for either single column or global lat-lon grid - ! If single_column, then assume that only master_task is used since there is only one task - - if (single_column) then - ! Check for consistency - if (my_task == master_task) then - if ((nx_global /= 1).or. (ny_global /= 1)) then - write(nu_diag,*) 'Because you have selected the column model flag' - write(nu_diag,*) 'Please set nx_global=ny_global=1 in file' - write(nu_diag,*) 'ice_domain_size.F and recompile' - call abort_ice (subname//'ERROR: check nx_global, ny_global') - endif - end if - - ! Read in domain file for single column - allocate(lats(nj)) - allocate(lons(ni)) - allocate(pos_lons(ni)) - allocate(glob_grid(ni,nj)) - - start3=(/1,1,1/) - count3=(/ni,nj,1/) - status = nf90_inq_varid(ncid, 'xc' , varid) - if (status /= nf90_noerr) call abort_ice (subname//' inq_varid xc') - status = nf90_get_var(ncid, varid, glob_grid, start3, count3) - if (status /= nf90_noerr) call abort_ice (subname//' get_var xc') - do i = 1,ni - lons(i) = glob_grid(i,1) - end do - - status = nf90_inq_varid(ncid, 'yc' , varid) - if (status /= nf90_noerr) call abort_ice (subname//' inq_varid yc') - status = nf90_get_var(ncid, varid, glob_grid, start3, count3) - if (status /= nf90_noerr) call abort_ice (subname//' get_var yc') - do j = 1,nj - lats(j) = glob_grid(1,j) - end do - - ! convert lons array and scmlon to 0,360 and find index of value closest to 0 - ! and obtain single-column longitude/latitude indices to retrieve - - pos_lons(:)= mod(lons(:) + 360._dbl_kind,360._dbl_kind) - pos_scmlon = mod(scmlon + 360._dbl_kind,360._dbl_kind) - start(1) = (MINLOC(abs(pos_lons-pos_scmlon),dim=1)) - start(2) = (MINLOC(abs(lats -scmlat ),dim=1)) - - deallocate(lats) - deallocate(lons) - deallocate(pos_lons) - deallocate(glob_grid) - - status = nf90_inq_varid(ncid, 'xc' , varid) - if (status /= nf90_noerr) call abort_ice (subname//' inq_varid xc') - status = nf90_get_var(ncid, varid, scamdata, start) - if (status /= nf90_noerr) call abort_ice (subname//' get_var xc') - TLON = scamdata - status = nf90_inq_varid(ncid, 'yc' , varid) - if (status /= nf90_noerr) call abort_ice (subname//' inq_varid yc') - status = nf90_get_var(ncid, varid, scamdata, start) - if (status /= nf90_noerr) call abort_ice (subname//' get_var yc') - TLAT = scamdata - status = nf90_inq_varid(ncid, 'area' , varid) - if (status /= nf90_noerr) call abort_ice (subname//' inq_varid area') - status = nf90_get_var(ncid, varid, scamdata, start) - if (status /= nf90_noerr) call abort_ice (subname//' get_var are') - tarea = scamdata - status = nf90_inq_varid(ncid, 'mask' , varid) - if (status /= nf90_noerr) call abort_ice (subname//' inq_varid mask') - status = nf90_get_var(ncid, varid, scamdata, start) - if (status /= nf90_noerr) call abort_ice (subname//' get_var mask') - hm = scamdata - status = nf90_inq_varid(ncid, 'frac' , varid) - if (status /= nf90_noerr) call abort_ice (subname//' inq_varid frac') - status = nf90_get_var(ncid, varid, scamdata, start) - if (status /= nf90_noerr) call abort_ice (subname//' get_var frac') - ocn_gridcell_frac = scamdata - else - ! Check for consistency - if (my_task == master_task) then - if (nx_global /= ni .and. ny_global /= nj) then - write(nu_diag,*) 'latlongrid: ni,nj = ',ni,nj - write(nu_diag,*) 'latlongrid: nx_g,ny_g = ',nx_global, ny_global - call abort_ice (subname//'ERROR: ni,nj not equal to nx_global,ny_global') - end if - end if - - ! Read in domain file for global lat-lon grid - call ice_read_nc(ncid, 1, 'xc' , TLON , diag=.true.) - call ice_read_nc(ncid, 1, 'yc' , TLAT , diag=.true.) - call ice_read_nc(ncid, 1, 'area', tarea , diag=.true., & - field_loc=field_loc_center,field_type=field_type_scalar) - call ice_read_nc(ncid, 1, 'mask', hm , diag=.true.) - call ice_read_nc(ncid, 1, 'frac', ocn_gridcell_frac, diag=.true.) - end if - - if (my_task == master_task) then - call ice_close_nc(ncid) - end if - - !$OMP PARALLEL DO PRIVATE(iblk,this_block,ilo,ihi,jlo,jhi,i,j) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - ! Convert from degrees to radians - TLON(i,j,iblk) = pi*TLON(i,j,iblk)/180._dbl_kind - - ! Convert from degrees to radians - TLAT(i,j,iblk) = pi*TLAT(i,j,iblk)/180._dbl_kind - - ! Convert from radians^2 to m^2 - ! (area in domain file is in radians^2 and tarea is in m^2) - tarea(i,j,iblk) = tarea(i,j,iblk) * (radius*radius) - end do - end do - end do - !$OMP END PARALLEL DO - - !----------------------------------------------------------------- - ! Calculate various geometric 2d arrays - ! The U grid (velocity) is not used when run with sequential CAM - ! because we only use thermodynamic sea ice. However, ULAT is used - ! in the default initialization of CICE so we calculate it here as - ! a "dummy" so that CICE will initialize with ice. If a no ice - ! initialization is OK (or desired) this can be commented out and - ! ULAT will remain 0 as specified above. ULAT is located at the - ! NE corner of the grid cell, TLAT at the center, so here ULAT is - ! hacked by adding half the latitudinal spacing (in radians) to - ! TLAT. - !----------------------------------------------------------------- - - !$OMP PARALLEL DO PRIVATE(iblk,this_block,ilo,ihi,jlo,jhi,i,j) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - - if (ny_global == 1) then - uarea(i,j,iblk) = tarea(i,j, iblk) - else - uarea(i,j,iblk) = p25* & - (tarea(i,j, iblk) + tarea(i+1,j, iblk) & - + tarea(i,j+1,iblk) + tarea(i+1,j+1,iblk)) - endif - tarear(i,j,iblk) = c1/tarea(i,j,iblk) - uarear(i,j,iblk) = c1/uarea(i,j,iblk) - - if (single_column) then - ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/nj) - else - if (ny_global == 1) then - ULAT (i,j,iblk) = TLAT(i,j,iblk) - else - ULAT (i,j,iblk) = TLAT(i,j,iblk)+(pi/ny_global) - endif - endif - ULON (i,j,iblk) = c0 - NLON (i,j,iblk) = c0 - NLAT (i,j,iblk) = c0 - ELON (i,j,iblk) = c0 - ELAT (i,j,iblk) = c0 - ANGLE (i,j,iblk) = c0 - - ANGLET(i,j,iblk) = c0 - HTN (i,j,iblk) = 1.e36_dbl_kind - HTE (i,j,iblk) = 1.e36_dbl_kind - dxT (i,j,iblk) = 1.e36_dbl_kind - dyT (i,j,iblk) = 1.e36_dbl_kind - dxU (i,j,iblk) = 1.e36_dbl_kind - dyU (i,j,iblk) = 1.e36_dbl_kind - dxN (i,j,iblk) = 1.e36_dbl_kind - dyN (i,j,iblk) = 1.e36_dbl_kind - dxE (i,j,iblk) = 1.e36_dbl_kind - dyE (i,j,iblk) = 1.e36_dbl_kind - dxhy (i,j,iblk) = 1.e36_dbl_kind - dyhx (i,j,iblk) = 1.e36_dbl_kind - cyp (i,j,iblk) = 1.e36_dbl_kind - cxp (i,j,iblk) = 1.e36_dbl_kind - cym (i,j,iblk) = 1.e36_dbl_kind - cxm (i,j,iblk) = 1.e36_dbl_kind - enddo - enddo - enddo - !$OMP END PARALLEL DO - - call makemask -#else - call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) -#endif - - end subroutine latlongrid -#endif -!======================================================================= - -! Regular rectangular grid and mask -! -! author: Elizabeth C. Hunke, LANL - - subroutine rectgrid - - use ice_constants, only: c0, c1, c2, radius, cm_to_m, & - field_loc_center, field_loc_NEcorner, field_type_scalar - use ice_domain, only: close_boundaries - - integer (kind=int_kind) :: & - i, j, & - imid, jmid - - real (kind=dbl_kind) :: & - length, & - rad_to_deg - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1 - - character(len=*), parameter :: subname = '(rectgrid)' - - !----------------------------------------------------------------- - ! Calculate various geometric 2d arrays - !----------------------------------------------------------------- - - call icepack_query_parameters(rad_to_deg_out=rad_to_deg) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - hm (:,:,:) = c0 - kmt(:,:,:) = c0 - angle(:,:,:) = c0 ! "square with the world" - - allocate(work_g1(nx_global,ny_global)) - - if (scale_dxdy) then - ! scale grid spacing from center outward. - ! this different than original method in it - ! needs to define grid spacing before lat/lon. - ! original rectgrid defines latlon first - call rectgrid_scale_dxdy - else - ! rectgrid no grid spacing. - ! original method with addition to use namelist lat/lon reference - - if (my_task == master_task) then - work_g1 = c0 - length = dxrect*cm_to_m/radius*rad_to_deg - - work_g1(1,:) = lonrefrect ! reference lon from namelist - - do j = 1, ny_global - do i = 2, nx_global - work_g1(i,j) = work_g1(i-1,j) + length ! ULON - enddo - enddo - work_g1(:,:) = work_g1(:,:) / rad_to_deg - endif - call scatter_global(ULON, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULON, distrb_info, & - ew_boundary_type, ns_boundary_type) - - if (my_task == master_task) then - work_g1 = c0 - length = dyrect*cm_to_m/radius*rad_to_deg - - work_g1(:,1) = latrefrect ! reference latitude from namelist - - do i = 1, nx_global - do j = 2, ny_global - work_g1(i,j) = work_g1(i,j-1) + length ! ULAT - enddo - enddo - work_g1(:,:) = work_g1(:,:) / rad_to_deg - endif - call scatter_global(ULAT, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULAT, distrb_info, & - ew_boundary_type, ns_boundary_type) - - if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - work_g1(i,j) = dxrect ! HTN - enddo - enddo - endif - call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE - - if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - work_g1(i,j) = dyrect ! HTE - enddo - enddo - endif - call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE - - endif ! scale_dxdy - - !----------------------------------------------------------------- - ! Construct T-cell land mask - ! Keyed on ew_boundary_type; ns_boundary_type should be 'open'. - !----------------------------------------------------------------- - - if (my_task == master_task) then - work_g1(:,:) = c0 ! initialize hm as land - - if (trim(kmt_type) == 'boxislands') then - - call grid_boxislands_kmt(work_g1) - - elseif (trim(kmt_type) == 'channel') then - - do j = 3,ny_global-2 ! closed top and bottom - do i = 1,nx_global ! open sides - work_g1(i,j) = c1 ! NOTE nx_global > 5 - enddo - enddo - - elseif (trim(kmt_type) == 'wall') then - - do j = 1,ny_global ! open except - do i = 1,nx_global-2 ! closed east edge - work_g1(i,j) = c1 - enddo - enddo - - elseif (trim(kmt_type) == 'default') then - - ! land in the upper left and lower right corners, - ! otherwise open boundaries - imid = nint(aint(real(nx_global)/c2)) - jmid = nint(aint(real(ny_global)/c2)) - - do j = 3,ny_global-2 - do i = 3,nx_global-2 - work_g1(i,j) = c1 ! open central domain - enddo - enddo - - if (nx_global > 5 .and. ny_global > 5) then - - do j = 1, jmid+2 - do i = 1, imid+2 - work_g1(i,j) = c1 ! open lower left corner - enddo - enddo - - do j = max(jmid-2,1), ny_global - do i = max(imid-2,1), nx_global - work_g1(i,j) = c1 ! open upper right corner - enddo - enddo - - endif ! > 5x5 grid - - else - - call abort_ice(subname//'ERROR: unknown kmt_type '//trim(kmt_type)) - - endif ! kmt_type - - if (close_boundaries) then - work_g1(:, 1:2) = c0 - work_g1(:, ny_global-1:ny_global) = c0 - work_g1(1:2, :) = c0 - work_g1(nx_global-1:nx_global, :) = c0 - endif - - endif - - call scatter_global(hm, work_g1, master_task, distrb_info, & - field_loc_center, field_type_scalar) - - deallocate(work_g1) - - end subroutine rectgrid - -!======================================================================= - - subroutine rectgrid_scale_dxdy - - ! generate a variable spaced rectangluar grid. - ! extend spacing from center of grid outward. - use ice_constants, only: c0, c1, c2, radius, cm_to_m, & - field_loc_center, field_loc_NEcorner, field_type_scalar - - integer (kind=int_kind) :: & - i, j, iblk, & - imid, jmid, & - center1, center2 ! array centers for expanding dx, dy - - real (kind=dbl_kind) :: & - length, & - rad_to_deg - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1 - - character(len=*), parameter :: subname = '(rectgrid_scale_dxdy)' - - call icepack_query_parameters(rad_to_deg_out=rad_to_deg) - - allocate(work_g1(nx_global,ny_global)) - - ! determine dx spacing - ! strategy: initialize with dxrect. - ! if want to scale the grid, work from center outwards, - ! multplying neighbor cell by scale factor. - ! this assumes dx varies in x direction only. - ! (i.e, dx is the same across same y location) - if (my_task == master_task) then - - ! initialize with initial dxrect - work_g1(:,:) = dxrect - - ! check if nx is even or odd - ! if even, middle 2 columns are center - ! of odd, middle 1 column is center - if (mod(nx_global,2) == 0) then ! nx_global is even - - ! with even number of x locatons, - ! the center two y columns are center - center1 = nx_global/2 ! integer math - center2 = center1 + 1 ! integer math - - else ! nx_global = odd - ! only one center index. set center2=center1 - center1 = ceiling(real(nx_global/2),int_kind) - center2 = center1 - endif - - ! note loop over only half the x grid points (center1)-1 - ! working from the center outward. - do j = 1, ny_global - do i = 1, center1-1 - ! work from center1 to left - work_g1(center1-i,j) = dxscale*work_g1(center1-i+1,j) - - ! work from center2 to right - work_g1(center2+i,j) = dxscale*work_g1(center2+i-1,j) - enddo ! i - enddo ! j - - endif ! my_task == master_task - - - ! note work_g1 is converted to meters in primary_grid_lengths_HTN - call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE - - ! make ULON array - if (my_task == master_task) then - - ! make first column reference lon in radians. - ! the remaining work_g1 is still dx in meters - work_g1(1,:) = lonrefrect/rad_to_deg ! radians - - ! loop over remaining points and add spacing to successive - ! x locations - do j = 1, ny_global - do i = 2, nx_global ! start from i=2. i=1 is lonrefrect - length = work_g1(i,j)/radius ! grid spacing in radians - work_g1(i,j) = work_g1(i-1,j) + length ! ULON - enddo ! i - enddo ! j - endif ! mytask == master_task - call scatter_global(ULON, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULON, distrb_info, & - ew_boundary_type, ns_boundary_type) - - ! determine dy spacing - ! strategy: initialize with dyrect. - ! if want to scale the grid, work from center outwards, - ! multplying neighbor cell by scale factor. - ! this assumes dy varies in y direction only. - ! (i.e, dy is the same across same x location) - if (my_task == master_task) then - - ! initialize with initial dxrect - work_g1(:,:) = dyrect - - ! check if ny is even or odd - ! if even, middle 2 rows are center - ! of odd, middle 1 row is center - if (mod(ny_global,2) == 0) then ! ny_global is even - - ! with even number of x locatons, - ! the center two y columns are center - center1 = ny_global/2 ! integer math - center2 = center1 + 1 ! integer math - - else ! ny_global = odd - ! only one center index. set center2=center1 - center1 = ceiling(real(ny_global/2),int_kind) - center2 = center1 - endif - - ! note loop over only half the y grid points (center1)-1 - ! working from the center outward. - do i = 1, nx_global - do j = 1, center1-1 - ! work from center1 to bottom - work_g1(i,center1-j) = dyscale*work_g1(i,center1-j+1) - - ! work from center2 to top - work_g1(i,center2+j) = dyscale*work_g1(i,center2+j-1) - enddo ! i - enddo ! j - endif ! mytask == master_task - ! note work_g1 is converted to meters primary_grid_lengths_HTE - call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE - - ! make ULAT array - if (my_task == master_task) then - - ! make first row reference lat in radians. - ! the remaining work_g1 is still dy in meters - work_g1(:,1) = latrefrect/rad_to_deg ! radians - - - ! loop over remaining points and add spacing to successive - ! x locations - do j = 2, ny_global ! start from j=2. j=1 is latrefrect - do i = 1, nx_global - length = work_g1(i,j)/radius ! grid spacing in radians - work_g1(i,j) = work_g1(i,j-1) + length ! ULAT - enddo ! i - enddo ! j - endif ! mytask == master_task - call scatter_global(ULAT, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloExtrapolate(ULAT, distrb_info, & - ew_boundary_type, ns_boundary_type) - - - deallocate(work_g1) - - end subroutine rectgrid_scale_dxdy - -!======================================================================= - - ! Complex land mask for testing box cases - ! Requires nx_global, ny_global > 20 - ! Assumes work array has been initialized to 1 (ocean) and north and - ! south land boundaries have been applied (ew_boundary_type='cyclic') - - subroutine grid_boxislands_kmt (work) - - use ice_constants, only: c0, c1, c20 - - real (kind=dbl_kind), dimension(:,:), intent(inout) :: work - - integer (kind=int_kind) :: & - i, j, k, & ! indices - nxb, nyb ! convenient cell-block sizes for building the mask - - character(len=*), parameter :: subname = '(grid_boxislands_kmt)' - - ! number of cells in 5% of global grid x and y lengths - nxb = int(real(nx_global, dbl_kind) / c20, int_kind) - nyb = int(real(ny_global, dbl_kind) / c20, int_kind) - - if (nxb < 1 .or. nyb < 1) & - call abort_ice(subname//'ERROR: requires larger grid size') - - ! initialize work area as all ocean (c1). - work(:,:) = c1 - - ! now add land points (c0) - ! northeast triangle - k = 0 - do j = ny_global, ny_global-3*nyb, -1 - k = k+1 - do i = nx_global-3*nxb+k, nx_global - work(i,j) = c0 - enddo - enddo - - ! northwest docks - do j = ny_global-3*nyb, ny_global - do i = 1, 1 - work(i,j) = c0 - enddo - enddo - do i = 1, 2*nxb - do j = ny_global-3*nyb, ny_global-nyb-2 - work(i,j) = c0 - enddo - do j = ny_global-nyb, ny_global-nyb+1 - work(i,j) = c0 - enddo - enddo - - ! southwest docks - do j = 2*nyb, 3*nyb - do i = 1, 1 - work(i,j) = c0 - enddo - enddo - do j = 1, 2*nyb - do i = 2, nxb - work(i,j) = c0 - enddo - do i = 2*nxb-1, 2*nxb - work(i,j) = c0 - enddo - do i = 2*nxb+2,4*nxb - work(i,j) = c0 - enddo - enddo - - ! tiny island - do j = 14*nyb, 14*nyb+1 - do i = 14*nxb, 14*nxb+1 - work(i,j) = c0 - enddo - enddo - - ! X islands - ! left triangle - k = 0 - do i = 2*nxb, 4*nxb - k=k+1 - do j = 10*nyb+k, 14*nyb-k - work(i,j) = c0 - enddo - enddo - ! upper triangle - k = 0 - do j = 14*nyb, 12*nyb, -1 - k=k+1 - do i = 2*nxb+2+k, 6*nxb-2-k - work(i,j) = c0 - enddo - enddo - ! diagonal - k = 0 - do j = 10*nyb, 14*nyb - k=k+1 - do i = 2*nxb+4+k, 2*nxb+6+k - work(i,j) = c0 - enddo - enddo - ! lower right triangle - k = 0 - do j = 12*nyb, 10*nyb, -1 - k=k+1 - do i = 5*nxb+k, 8*nxb - work(i,j) = c0 - enddo - enddo - - ! bar islands - do i = 10*nxb, 16*nxb - do j = 4*nyb, 5*nyb - work(i,j) = c0 - enddo - do j = 6*nyb+2, 8*nyb - work(i,j) = c0 - enddo - do j = 8*nyb+2, 8*nyb+3 - work(i,j) = c0 - enddo - enddo - - end subroutine grid_boxislands_kmt - -!======================================================================= - -! CPOM displaced pole grid and land mask. \\ -! Grid record number, field and units are: \\ -! (1) ULAT (degrees) \\ -! (2) ULON (degrees) \\ -! (3) HTN (m) \\ -! (4) HTE (m) \\ -! (7) ANGLE (radians) \\ -! -! Land mask record number and field is (1) KMT. -! -! author: Adrian K. Turner, CPOM, UCL, 09/08/06 - - subroutine cpomgrid - - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c1, m_to_cm, & - field_loc_NEcorner, field_type_scalar - use ice_domain_size, only: max_blocks - - integer (kind=int_kind) :: & - i, j, iblk, & - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - logical (kind=log_kind) :: diag - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1 - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 - - real (kind=dbl_kind) :: & - rad_to_deg - - type (block) :: & - this_block ! block information for current block - - character(len=*), parameter :: subname = '(cpomgrid)' - - call icepack_query_parameters(rad_to_deg_out=rad_to_deg) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - call ice_open(nu_grid,grid_file,64) - call ice_open(nu_kmt,kmt_file,32) - - diag = .true. ! write diagnostic info - - ! topography - call ice_read(nu_kmt,1,work1,'ida4',diag) - - hm (:,:,:) = c0 - kmt(:,:,:) = c0 - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - kmt(i,j,iblk) = work1(i,j,iblk) - if (kmt(i,j,iblk) >= c1) hm(i,j,iblk) = c1 - enddo - enddo - enddo - !$OMP END PARALLEL DO - - allocate(work_g1(nx_global,ny_global)) - - ! lat, lon, cell dimensions, angles - call ice_read_global(nu_grid,1,work_g1, 'rda8',diag) - call scatter_global(ULAT, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - - call ice_read_global(nu_grid,2,work_g1, 'rda8',diag) - call scatter_global(ULON, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - - call ice_read_global(nu_grid,3,work_g1, 'rda8',diag) - work_g1 = work_g1 * m_to_cm - call primary_grid_lengths_HTN(work_g1) ! dxU, dxT, dxN, dxE - - call ice_read_global(nu_grid,4,work_g1, 'rda8',diag) - work_g1 = work_g1 * m_to_cm - call primary_grid_lengths_HTE(work_g1) ! dyU, dyT, dyN, dyE - - call ice_read_global(nu_grid,7,work_g1,'rda8',diag) - call scatter_global(ANGLE, work_g1, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - - ! fix units - ULAT = ULAT / rad_to_deg - ULON = ULON / rad_to_deg - - deallocate(work_g1) - - if (my_task == master_task) then - close (nu_grid) - close (nu_kmt) - endif - - write(nu_diag,*) "min/max HTN: ", minval(HTN), maxval(HTN) - write(nu_diag,*) "min/max HTE: ", minval(HTE), maxval(HTE) - - end subroutine cpomgrid - -!======================================================================= - -! Calculate dxU and dxT from HTN on the global grid, to preserve -! ghost cell and/or land values that might otherwise be lost. Scatter -! dxU, dxT and HTN to all processors. -! -! author: Elizabeth C. Hunke, LANL - - subroutine primary_grid_lengths_HTN(work_g) - - use ice_constants, only: p25, p5, c2, cm_to_m, & - field_loc_center, field_loc_NEcorner, & - field_loc_Nface, field_type_scalar - - real (kind=dbl_kind), dimension(:,:) :: work_g ! global array holding HTN - - ! local variables - - integer (kind=int_kind) :: & - i, j, & - ip1 ! i+1 - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g2 - - character(len=*), parameter :: subname = '(primary_grid_lengths_HTN)' - - if (my_task == master_task) then - allocate(work_g2(nx_global,ny_global)) - else - allocate(work_g2(1,1)) - endif - - ! HTN, dxU = average of 2 neighbor HTNs in i - - if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - work_g(i,j) = work_g(i,j) * cm_to_m ! HTN - enddo - enddo - do j = 1, ny_global - do i = 1, nx_global - ! assume cyclic; noncyclic will be handled during scatter - ip1 = i+1 - if (i == nx_global) ip1 = 1 - work_g2(i,j) = p5*(work_g(i,j) + work_g(ip1,j)) ! dxU - enddo - enddo - endif - if (pgl_global_ext) then - call primary_grid_lengths_global_ext( & - G_HTN, work_g, ew_boundary_type, ns_boundary_type) - endif - call scatter_global(HTN, work_g, master_task, distrb_info, & - field_loc_Nface, field_type_scalar) - call scatter_global(dxU, work_g2, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - - ! dxT = average of 2 neighbor HTNs in j - - if (my_task == master_task) then - do j = 2, ny_global - do i = 1, nx_global - work_g2(i,j) = p5*(work_g(i,j) + work_g(i,j-1)) ! dxT - enddo - enddo - ! extrapolate to obtain dxT along j=1 - do i = 1, nx_global - work_g2(i,1) = c2*work_g(i,2) - work_g(i,3) ! dxT - enddo - endif - call scatter_global(dxT, work_g2, master_task, distrb_info, & - field_loc_center, field_type_scalar) - - ! dxN = HTN - - dxN(:,:,:) = HTN(:,:,:) ! dxN - - ! dxE = average of 4 surrounding HTNs - - if (my_task == master_task) then - do j = 2, ny_global - do i = 1, nx_global - ! assume cyclic; noncyclic will be handled during scatter - ip1 = i+1 - if (i == nx_global) ip1 = 1 - work_g2(i,j) = p25*(work_g(i,j)+work_g(ip1,j)+work_g(i,j-1)+work_g(ip1,j-1)) ! dxE - enddo - enddo - ! extrapolate to obtain dxT along j=1 - do i = 1, nx_global - ! assume cyclic; noncyclic will be handled during scatter - ip1 = i+1 - if (i == nx_global) ip1 = 1 - work_g2(i,1) = p5*(c2*work_g(i ,2) - work_g(i ,3) + & - c2*work_g(ip1,2) - work_g(ip1,3)) ! dxE - enddo - endif - call scatter_global(dxE, work_g2, master_task, distrb_info, & - field_loc_center, field_type_scalar) - - deallocate(work_g2) - - end subroutine primary_grid_lengths_HTN - -!======================================================================= -! Calculate dyU and dyT from HTE on the global grid, to preserve -! ghost cell and/or land values that might otherwise be lost. Scatter -! dyU, dyT and HTE to all processors. -! -! author: Elizabeth C. Hunke, LANL - - subroutine primary_grid_lengths_HTE(work_g) - - use ice_constants, only: p25, p5, c2, cm_to_m, & - field_loc_center, field_loc_NEcorner, & - field_loc_Eface, field_type_scalar - - real (kind=dbl_kind), dimension(:,:) :: work_g ! global array holding HTE - - ! local variables - - integer (kind=int_kind) :: & - i, j, & - im1 ! i-1 - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g2 - - character(len=*), parameter :: subname = '(primary_grid_lengths_HTE)' - - if (my_task == master_task) then - allocate(work_g2(nx_global,ny_global)) - else - allocate(work_g2(1,1)) - endif - - ! HTE, dyU = average of 2 neighbor HTE in j - - if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - work_g(i,j) = work_g(i,j) * cm_to_m ! HTE - enddo - enddo - do j = 1, ny_global-1 - do i = 1, nx_global - work_g2(i,j) = p5*(work_g(i,j) + work_g(i,j+1)) ! dyU - enddo - enddo - ! extrapolate to obtain dyU along j=ny_global - if (ny_global > 1) then - do i = 1, nx_global - work_g2(i,ny_global) = c2*work_g(i,ny_global-1) - work_g(i,ny_global-2) ! dyU - enddo - endif - endif - if (pgl_global_ext) then - call primary_grid_lengths_global_ext( & - G_HTE, work_g, ew_boundary_type, ns_boundary_type) - endif - call scatter_global(HTE, work_g, master_task, distrb_info, & - field_loc_Eface, field_type_scalar) - call scatter_global(dyU, work_g2, master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - - ! dyT = average of 2 neighbor HTE in i - - if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - ! assume cyclic; noncyclic will be handled during scatter - im1 = i-1 - if (i == 1) im1 = nx_global - work_g2(i,j) = p5*(work_g(i,j) + work_g(im1,j)) ! dyT - enddo - enddo - endif - call scatter_global(dyT, work_g2, master_task, distrb_info, & - field_loc_center, field_type_scalar) - - ! dyN = average of 4 neighbor HTEs - - if (my_task == master_task) then - do j = 1, ny_global-1 - do i = 1, nx_global - ! assume cyclic; noncyclic will be handled during scatter - im1 = i-1 - if (i == 1) im1 = nx_global - work_g2(i,j) = p25*(work_g(i,j) + work_g(im1,j) + work_g(i,j+1) + work_g(im1,j+1)) ! dyN - enddo - enddo - ! extrapolate to obtain dyN along j=ny_global - if (ny_global > 1) then - do i = 1, nx_global - ! assume cyclic; noncyclic will be handled during scatter - im1 = i-1 - if (i == 1) im1 = nx_global - work_g2(i,ny_global) = p5*(c2*work_g(i ,ny_global-1) - work_g(i ,ny_global-2) + & - c2*work_g(im1,ny_global-1) - work_g(im1,ny_global-2)) ! dyN - enddo - endif - endif - call scatter_global(dyN, work_g2, master_task, distrb_info, & - field_loc_center, field_type_scalar) - - ! dyE = HTE - - dyE(:,:,:) = HTE(:,:,:) - - deallocate(work_g2) - - end subroutine primary_grid_lengths_HTE - -!======================================================================= - -! Sets the boundary values for the T cell land mask (hm) and -! makes the logical land masks for T and U cells (tmask, umask) -! and N and E cells (nmask, emask). -! Also creates hemisphere masks (mask-n northern, mask-s southern) -! -! author: Elizabeth C. Hunke, LANL - - subroutine makemask - - use ice_constants, only: c0, p5, c1p5, & - field_loc_center, field_loc_NEcorner, field_type_scalar, & - field_loc_Nface, field_loc_Eface - - integer (kind=int_kind) :: & - i, j, iblk, & - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - real (kind=dbl_kind) :: & - puny - - real (kind=dbl_kind), dimension(:,:,:), allocatable :: & - uvmCD - - type (block) :: & - this_block ! block information for current block - - character(len=*), parameter :: subname = '(makemask)' - - call icepack_query_parameters(puny_out=puny) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (kmt, halo_info, & - field_loc_center, field_type_scalar) - call ice_HaloUpdate (hm, halo_info, & - field_loc_center, field_type_scalar) - call ice_timer_stop(timer_bound) - - !----------------------------------------------------------------- - ! construct T-cell and U-cell masks - !----------------------------------------------------------------- - - bm = c0 - allocate(uvmCD(nx_block,ny_block,max_blocks)) - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - uvm(i,j,iblk) = min (hm(i,j, iblk), hm(i+1,j, iblk), & - hm(i,j+1,iblk), hm(i+1,j+1,iblk)) - npm(i,j,iblk) = min (hm(i,j, iblk), hm(i,j+1,iblk)) - epm(i,j,iblk) = min (hm(i,j, iblk), hm(i+1,j,iblk)) - bm(i,j,iblk) = my_task + iblk/100.0_dbl_kind - uvmCD(i,j,iblk) = (hm(i,j, iblk)+hm(i+1,j, iblk) & - + hm(i,j+1,iblk)+hm(i+1,j+1,iblk)) - enddo - enddo - enddo - !$OMP END PARALLEL DO - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (uvm, halo_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloUpdate (uvmCD, halo_info, & - field_loc_NEcorner, field_type_scalar) - call ice_HaloUpdate (npm, halo_info, & - field_loc_Nface, field_type_scalar) - call ice_HaloUpdate (epm, halo_info, & - field_loc_Eface, field_type_scalar) - call ice_HaloUpdate (bm, halo_info, & - field_loc_center, field_type_scalar) - call ice_timer_stop(timer_bound) - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - ! needs to cover halo (no halo update for logicals) - tmask(:,:,iblk) = .false. - umask(:,:,iblk) = .false. - umaskCD(:,:,iblk) = .false. - nmask(:,:,iblk) = .false. - emask(:,:,iblk) = .false. - do j = jlo-nghost, jhi+nghost - do i = ilo-nghost, ihi+nghost - if ( hm(i,j,iblk) > p5 ) tmask (i,j,iblk) = .true. - if (uvm(i,j,iblk) > p5 ) umask (i,j,iblk) = .true. - if (uvmCD(i,j,iblk) > c1p5) umaskCD(i,j,iblk) = .true. - if (npm(i,j,iblk) > p5 ) nmask (i,j,iblk) = .true. - if (epm(i,j,iblk) > p5 ) emask (i,j,iblk) = .true. - enddo - enddo - - enddo - !$OMP END PARALLEL DO - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - !----------------------------------------------------------------- - ! create hemisphere masks - !----------------------------------------------------------------- - - lmask_n(:,:,iblk) = .false. - lmask_s(:,:,iblk) = .false. - - tarean(:,:,iblk) = c0 - tareas(:,:,iblk) = c0 - - do j = jlo,jhi - do i = ilo,ihi - - if (ULAT(i,j,iblk) >= -puny) then - lmask_n(i,j,iblk) = .true. ! N. Hem. - else - lmask_s(i,j,iblk) = .true. ! S. Hem. - endif - - ! N hemisphere area mask (m^2) - if (lmask_n(i,j,iblk)) tarean(i,j,iblk) = tarea(i,j,iblk) & - * hm(i,j,iblk) - - ! S hemisphere area mask (m^2) - if (lmask_s(i,j,iblk)) tareas(i,j,iblk) = tarea(i,j,iblk) & - * hm(i,j,iblk) - - enddo - enddo - - enddo ! iblk - !$OMP END PARALLEL DO - - deallocate(uvmCD) - - end subroutine makemask - -!======================================================================= - -! Initializes latitude and longitude on T grid -! -! author: Elizabeth C. Hunke, LANL; code originally based on POP grid -! generation routine - - subroutine Tlatlon - - use ice_constants, only: c0, c1, c1p5, c2, c4, p5, & - field_loc_center, field_loc_Nface, field_loc_Eface, & - field_type_scalar - - integer (kind=int_kind) :: & - i, j, iblk , & ! horizontal indices - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - real (kind=dbl_kind) :: & - z1,x1,y1,z2,x2,y2,z3,x3,y3,z4,x4,y4,tx,ty,tz,da, & - rad_to_deg - - type (block) :: & - this_block ! block information for current block - - character(len=*), parameter :: subname = '(Tlatlon)' - - call icepack_query_parameters(rad_to_deg_out=rad_to_deg) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - TLAT(:,:,:) = c0 - TLON(:,:,:) = c0 - NLAT(:,:,:) = c0 - NLON(:,:,:) = c0 - ELAT(:,:,:) = c0 - ELON(:,:,:) = c0 - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & - !$OMP x1,y1,z1,x2,y2,z2,x3,y3,z3,x4,y4,z4, & - !$OMP tx,ty,tz,da) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - - z1 = cos(ULAT(i-1,j-1,iblk)) - x1 = cos(ULON(i-1,j-1,iblk))*z1 - y1 = sin(ULON(i-1,j-1,iblk))*z1 - z1 = sin(ULAT(i-1,j-1,iblk)) - - z2 = cos(ULAT(i,j-1,iblk)) - x2 = cos(ULON(i,j-1,iblk))*z2 - y2 = sin(ULON(i,j-1,iblk))*z2 - z2 = sin(ULAT(i,j-1,iblk)) - - z3 = cos(ULAT(i-1,j,iblk)) - x3 = cos(ULON(i-1,j,iblk))*z3 - y3 = sin(ULON(i-1,j,iblk))*z3 - z3 = sin(ULAT(i-1,j,iblk)) - - z4 = cos(ULAT(i,j,iblk)) - x4 = cos(ULON(i,j,iblk))*z4 - y4 = sin(ULON(i,j,iblk))*z4 - z4 = sin(ULAT(i,j,iblk)) - - ! --------- - ! TLON/TLAT 4 pt computation (pts 1, 2, 3, 4) - ! --------- - - tx = (x1+x2+x3+x4)/c4 - ty = (y1+y2+y3+y4)/c4 - tz = (z1+z2+z3+z4)/c4 - da = sqrt(tx**2+ty**2+tz**2) - - tz = tz/da - - ! TLON in radians East - TLON(i,j,iblk) = c0 - if (tx /= c0 .or. ty /= c0) TLON(i,j,iblk) = atan2(ty,tx) - - ! TLAT in radians North - TLAT(i,j,iblk) = asin(tz) - -! these two loops should be merged to save cos/sin calculations, -! but atan2 is not bit-for-bit. This suggests the result for atan2 depends on -! the prior atan2 call ??? not sure what's going on. -#if (1 == 1) - enddo ! i - enddo ! j - enddo ! iblk - !$OMP END PARALLEL DO - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block, & - !$OMP x1,y1,z1,x2,y2,z2,x3,y3,z3,x4,y4,z4, & - !$OMP tx,ty,tz,da) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - - z1 = cos(ULAT(i-1,j-1,iblk)) - x1 = cos(ULON(i-1,j-1,iblk))*z1 - y1 = sin(ULON(i-1,j-1,iblk))*z1 - z1 = sin(ULAT(i-1,j-1,iblk)) - - z2 = cos(ULAT(i,j-1,iblk)) - x2 = cos(ULON(i,j-1,iblk))*z2 - y2 = sin(ULON(i,j-1,iblk))*z2 - z2 = sin(ULAT(i,j-1,iblk)) - - z3 = cos(ULAT(i-1,j,iblk)) - x3 = cos(ULON(i-1,j,iblk))*z3 - y3 = sin(ULON(i-1,j,iblk))*z3 - z3 = sin(ULAT(i-1,j,iblk)) - - z4 = cos(ULAT(i,j,iblk)) - x4 = cos(ULON(i,j,iblk))*z4 - y4 = sin(ULON(i,j,iblk))*z4 - z4 = sin(ULAT(i,j,iblk)) -#endif - ! --------- - ! NLON/NLAT 2 pt computation (pts 3, 4) - ! --------- - - tx = (x3+x4)/c2 - ty = (y3+y4)/c2 - tz = (z3+z4)/c2 - da = sqrt(tx**2+ty**2+tz**2) - - tz = tz/da - - ! NLON in radians East - NLON(i,j,iblk) = c0 - if (tx /= c0 .or. ty /= c0) NLON(i,j,iblk) = atan2(ty,tx) - - ! NLAT in radians North - NLAT(i,j,iblk) = asin(tz) - - ! --------- - ! ELON/ELAT 2 pt computation (pts 2, 4) - ! --------- - - tx = (x2+x4)/c2 - ty = (y2+y4)/c2 - tz = (z2+z4)/c2 - da = sqrt(tx**2+ty**2+tz**2) - - tz = tz/da - - ! ELON in radians East - ELON(i,j,iblk) = c0 - if (tx /= c0 .or. ty /= c0) ELON(i,j,iblk) = atan2(ty,tx) - - ! ELAT in radians North - ELAT(i,j,iblk) = asin(tz) - - enddo ! i - enddo ! j - enddo ! iblk - !$OMP END PARALLEL DO - - if (trim(grid_type) == 'regional') then - ! for W boundary extrapolate from interior - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - i = ilo - if (this_block%i_glob(i) == 1) then - do j = jlo, jhi - TLON(i,j,iblk) = c2*TLON(i+1,j,iblk) - & - TLON(i+2,j,iblk) - TLAT(i,j,iblk) = c2*TLAT(i+1,j,iblk) - & - TLAT(i+2,j,iblk) - NLON(i,j,iblk) = c1p5*TLON(i+1,j,iblk) - & - p5*TLON(i+2,j,iblk) - NLAT(i,j,iblk) = c1p5*TLAT(i+1,j,iblk) - & - p5*TLAT(i+2,j,iblk) - enddo - endif - enddo - !$OMP END PARALLEL DO - endif ! regional - - call ice_timer_start(timer_bound) - call ice_HaloUpdate (TLON, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (TLAT, halo_info, & - field_loc_center, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (NLON, halo_info, & - field_loc_Nface, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (NLAT, halo_info, & - field_loc_Nface, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (ELON, halo_info, & - field_loc_Eface, field_type_scalar, & - fillValue=c1) - call ice_HaloUpdate (ELAT, halo_info, & - field_loc_Eface, field_type_scalar, & - fillValue=c1) - call ice_HaloExtrapolate(TLON, distrb_info, & - ew_boundary_type, ns_boundary_type) - call ice_HaloExtrapolate(TLAT, distrb_info, & - ew_boundary_type, ns_boundary_type) - call ice_HaloExtrapolate(NLON, distrb_info, & - ew_boundary_type, ns_boundary_type) - call ice_HaloExtrapolate(NLAT, distrb_info, & - ew_boundary_type, ns_boundary_type) - call ice_HaloExtrapolate(ELON, distrb_info, & - ew_boundary_type, ns_boundary_type) - call ice_HaloExtrapolate(ELAT, distrb_info, & - ew_boundary_type, ns_boundary_type) - call ice_timer_stop(timer_bound) - - x1 = global_minval(TLON, distrb_info, tmask) - x2 = global_maxval(TLON, distrb_info, tmask) - x3 = global_minval(TLAT, distrb_info, tmask) - x4 = global_maxval(TLAT, distrb_info, tmask) - - y1 = global_minval(ULON, distrb_info, umask) - y2 = global_maxval(ULON, distrb_info, umask) - y3 = global_minval(ULAT, distrb_info, umask) - y4 = global_maxval(ULAT, distrb_info, umask) - - if (my_task==master_task) then - write(nu_diag,*) ' ' -! if (nx_block > 5+2*nghost .and. ny_block > 5+2*nghost) then - write(nu_diag,*) 'min/max ULON:', y1*rad_to_deg, y2*rad_to_deg - write(nu_diag,*) 'min/max ULAT:', y3*rad_to_deg, y4*rad_to_deg -! endif - write(nu_diag,*) 'min/max TLON:', x1*rad_to_deg, x2*rad_to_deg - write(nu_diag,*) 'min/max TLAT:', x3*rad_to_deg, x4*rad_to_deg - endif ! my_task - - x1 = global_minval(NLON, distrb_info, nmask) - x2 = global_maxval(NLON, distrb_info, nmask) - x3 = global_minval(NLAT, distrb_info, nmask) - x4 = global_maxval(NLAT, distrb_info, nmask) - - y1 = global_minval(ELON, distrb_info, emask) - y2 = global_maxval(ELON, distrb_info, emask) - y3 = global_minval(ELAT, distrb_info, emask) - y4 = global_maxval(ELAT, distrb_info, emask) - - if (my_task==master_task) then - write(nu_diag,*) ' ' -! if (nx_block > 5+2*nghost .and. ny_block > 5+2*nghost) then - write(nu_diag,*) 'min/max NLON:', x1*rad_to_deg, x2*rad_to_deg - write(nu_diag,*) 'min/max NLAT:', x3*rad_to_deg, x4*rad_to_deg - write(nu_diag,*) 'min/max ELON:', y1*rad_to_deg, y2*rad_to_deg - write(nu_diag,*) 'min/max ELAT:', y3*rad_to_deg, y4*rad_to_deg -! endif - endif ! my_task - - end subroutine Tlatlon - -!======================================================================= - -! Shifts quantities from one grid to another -! Constructs the shift based on the grid -! NOTE: Input array includes ghost cells that must be updated before -! calling this routine. -! -! author: T. Craig - - subroutine grid_average_X2Y_base(type,work1,grid1,work2,grid2) - - character(len=*) , intent(in) :: & - type, grid1, grid2 - - real (kind=dbl_kind), intent(in) :: & - work1(:,:,:) - - real (kind=dbl_kind), intent(out) :: & - work2(:,:,:) - - ! local variables - - character(len=16) :: X2Y - - character(len=*), parameter :: subname = '(grid_average_X2Y_base)' - - if (trim(grid1) == trim(grid2)) then - work2 = work1 - else - X2Y = trim(grid1)//'2'//trim(grid2)//trim(type) - call grid_average_X2Y_1(X2Y,work1,work2) - endif - - end subroutine grid_average_X2Y_base - -!======================================================================= - -! Shifts quantities from one grid to another -! NOTE: Input array includes ghost cells that must be updated before -! calling this routine. -! -! author: T. Craig - - subroutine grid_average_X2Y_userwghts(type,work1,grid1,wght1,mask1,work2,grid2) - - character(len=*) , intent(in) :: & - type, grid1, grid2 - - real (kind=dbl_kind), intent(in) :: & - work1(:,:,:), & - wght1(:,:,:), & - mask1(:,:,:) - - real (kind=dbl_kind), intent(out) :: & - work2(:,:,:) - - ! local variables - - character(len=16) :: X2Y - - character(len=*), parameter :: subname = '(grid_average_X2Y_userwghts)' - - if (trim(grid1) == trim(grid2)) then - work2 = work1 - else - X2Y = trim(grid1)//'2'//trim(grid2)//trim(type) - call grid_average_X2Y_1f(X2Y,work1,wght1,mask1,work2) - endif - - end subroutine grid_average_X2Y_userwghts - -!======================================================================= - -! Shifts quantities from one grid to another -! NOTE: Input array includes ghost cells that must be updated before -! calling this routine. -! -! author: T. Craig - - subroutine grid_average_X2Y_NEversion(type,work1a,grid1a,work1b,grid1b,work2,grid2) - - character(len=*) , intent(in) :: & - type, grid1a, grid1b, grid2 - - real (kind=dbl_kind), intent(in) :: & - work1a(:,:,:), work1b(:,:,:) - - real (kind=dbl_kind), intent(out) :: & - work2(:,:,:) - - ! local variables - - character(len=16) :: X2Y - - character(len=*), parameter :: subname = '(grid_average_X2Y_NEversion)' - - X2Y = trim(grid1a)//trim(grid1b)//'2'//trim(grid2)//trim(type) - - select case (trim(X2Y)) - - ! state masked - case('NE2US') - call grid_average_X2Y_2('NE2US',work1a,narea,npm,work1b,earea,epm,work2) - case('EN2US') - call grid_average_X2Y_2('NE2US',work1b,narea,npm,work1a,earea,epm,work2) - case('NE2TS') - call grid_average_X2Y_2('NE2TS',work1a,narea,npm,work1b,earea,epm,work2) - case('EN2TS') - call grid_average_X2Y_2('NE2TS',work1b,narea,npm,work1a,earea,epm,work2) - - ! state unmasked - case('NE2UA') - call grid_average_X2Y_2('NE2UA',work1a,narea,npm,work1b,earea,epm,work2) - case('EN2UA') - call grid_average_X2Y_2('NE2UA',work1b,narea,npm,work1a,earea,epm,work2) - case('NE2TA') - call grid_average_X2Y_2('NE2TA',work1a,narea,npm,work1b,earea,epm,work2) - case('EN2TA') - call grid_average_X2Y_2('NE2TA',work1b,narea,npm,work1a,earea,epm,work2) - - case default - call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) - end select - - end subroutine grid_average_X2Y_NEversion - -!======================================================================= - -! Shifts quantities from one grid to another -! NOTE: Input array includes ghost cells that must be updated before -! calling this routine. -! -! author: T. Craig - - subroutine grid_average_X2Y_1(X2Y,work1,work2) - - character(len=*) , intent(in) :: & - X2Y - - real (kind=dbl_kind), intent(in) :: & - work1(:,:,:) - - real (kind=dbl_kind), intent(out) :: & - work2(:,:,:) - - ! local variables - - character(len=*), parameter :: subname = '(grid_average_X2Y_1)' - - select case (trim(X2Y)) - - ! flux unmasked - case('T2UF') - call grid_average_X2YF('NE',work1,tarea,work2,uarea) - case('T2EF') - call grid_average_X2YF('E' ,work1,tarea,work2,earea) - case('T2NF') - call grid_average_X2YF('N' ,work1,tarea,work2,narea) - case('U2TF') - call grid_average_X2YF('SW',work1,uarea,work2,tarea) - case('U2EF') - call grid_average_X2YF('S' ,work1,uarea,work2,earea) - case('U2NF') - call grid_average_X2YF('W' ,work1,uarea,work2,narea) - case('E2TF') - call grid_average_X2YF('W' ,work1,earea,work2,tarea) - case('E2UF') - call grid_average_X2YF('N' ,work1,earea,work2,uarea) - case('E2NF') - call grid_average_X2YF('NW',work1,earea,work2,narea) - case('N2TF') - call grid_average_X2YF('S' ,work1,narea,work2,tarea) - case('N2UF') - call grid_average_X2YF('E' ,work1,narea,work2,uarea) - case('N2EF') - call grid_average_X2YF('SE',work1,narea,work2,earea) - - ! state masked - case('T2US') - call grid_average_X2YS('NE',work1,tarea,hm ,work2) - case('T2ES') - call grid_average_X2YS('E' ,work1,tarea,hm ,work2) - case('T2NS') - call grid_average_X2YS('N' ,work1,tarea,hm ,work2) - case('U2TS') - call grid_average_X2YS('SW',work1,uarea,uvm,work2) - case('U2ES') - call grid_average_X2YS('S' ,work1,uarea,uvm,work2) - case('U2NS') - call grid_average_X2YS('W' ,work1,uarea,uvm,work2) - case('E2TS') - call grid_average_X2YS('W' ,work1,earea,epm,work2) - case('E2US') - call grid_average_X2YS('N' ,work1,earea,epm,work2) - case('E2NS') - call grid_average_X2YS('NW',work1,earea,epm,work2) - case('N2TS') - call grid_average_X2YS('S' ,work1,narea,npm,work2) - case('N2US') - call grid_average_X2YS('E' ,work1,narea,npm,work2) - case('N2ES') - call grid_average_X2YS('SE',work1,narea,npm,work2) - - ! state unmasked - case('T2UA') - call grid_average_X2YA('NE',work1,tarea,work2) - case('T2EA') - call grid_average_X2YA('E' ,work1,tarea,work2) - case('T2NA') - call grid_average_X2YA('N' ,work1,tarea,work2) - case('U2TA') - call grid_average_X2YA('SW',work1,uarea,work2) - case('U2EA') - call grid_average_X2YA('S' ,work1,uarea,work2) - case('U2NA') - call grid_average_X2YA('W' ,work1,uarea,work2) - case('E2TA') - call grid_average_X2YA('W' ,work1,earea,work2) - case('E2UA') - call grid_average_X2YA('N' ,work1,earea,work2) - case('E2NA') - call grid_average_X2YA('NW',work1,earea,work2) - case('N2TA') - call grid_average_X2YA('S' ,work1,narea,work2) - case('N2UA') - call grid_average_X2YA('E' ,work1,narea,work2) - case('N2EA') - call grid_average_X2YA('SE',work1,narea,work2) - - case default - call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) - end select - - end subroutine grid_average_X2Y_1 - -!======================================================================= - -! Shifts quantities from one grid to another -! NOTE: Input array includes ghost cells that must be updated before -! calling this routine. -! -! author: T. Craig - - subroutine grid_average_X2Y_1f(X2Y,work1,wght1,mask1,work2) - - character(len=*) , intent(in) :: & - X2Y - - real (kind=dbl_kind), intent(in) :: & - work1(:,:,:), & - wght1(:,:,:), & - mask1(:,:,:) - - real (kind=dbl_kind), intent(out) :: & - work2(:,:,:) - - ! local variables - - character(len=*), parameter :: subname = '(grid_average_X2Y_1f)' - - select case (trim(X2Y)) - -! don't support these for now, requires extra destination wght -! ! flux unmasked -! case('T2UF') -! call grid_average_X2YF('NE',work1,tarea,work2,uarea) -! case('T2EF') -! call grid_average_X2YF('E' ,work1,tarea,work2,earea) -! case('T2NF') -! call grid_average_X2YF('N' ,work1,tarea,work2,narea) -! case('U2TF') -! call grid_average_X2YF('SW',work1,uarea,work2,tarea) -! case('U2EF') -! call grid_average_X2YF('S' ,work1,uarea,work2,earea) -! case('U2NF') -! call grid_average_X2YF('W' ,work1,uarea,work2,narea) -! case('E2TF') -! call grid_average_X2YF('W' ,work1,earea,work2,tarea) -! case('E2UF') -! call grid_average_X2YF('N' ,work1,earea,work2,uarea) -! case('E2NF') -! call grid_average_X2YF('NW',work1,earea,work2,narea) -! case('N2TF') -! call grid_average_X2YF('S' ,work1,narea,work2,tarea) -! case('N2UF') -! call grid_average_X2YF('E' ,work1,narea,work2,uarea) -! case('N2EF') -! call grid_average_X2YF('SE',work1,narea,work2,earea) - - ! state masked - case('T2US') - call grid_average_X2YS('NE',work1,wght1,mask1,work2) - case('T2ES') - call grid_average_X2YS('E' ,work1,wght1,mask1,work2) - case('T2NS') - call grid_average_X2YS('N' ,work1,wght1,mask1,work2) - case('U2TS') - call grid_average_X2YS('SW',work1,wght1,mask1,work2) - case('U2ES') - call grid_average_X2YS('S' ,work1,wght1,mask1,work2) - case('U2NS') - call grid_average_X2YS('W' ,work1,wght1,mask1,work2) - case('E2TS') - call grid_average_X2YS('W' ,work1,wght1,mask1,work2) - case('E2US') - call grid_average_X2YS('N' ,work1,wght1,mask1,work2) - case('E2NS') - call grid_average_X2YS('NW',work1,wght1,mask1,work2) - case('N2TS') - call grid_average_X2YS('S' ,work1,wght1,mask1,work2) - case('N2US') - call grid_average_X2YS('E' ,work1,wght1,mask1,work2) - case('N2ES') - call grid_average_X2YS('SE',work1,wght1,mask1,work2) - - ! state unmasked - case('T2UA') - call grid_average_X2YA('NE',work1,wght1,work2) - case('T2EA') - call grid_average_X2YA('E' ,work1,wght1,work2) - case('T2NA') - call grid_average_X2YA('N' ,work1,wght1,work2) - case('U2TA') - call grid_average_X2YA('SW',work1,wght1,work2) - case('U2EA') - call grid_average_X2YA('S' ,work1,wght1,work2) - case('U2NA') - call grid_average_X2YA('W' ,work1,wght1,work2) - case('E2TA') - call grid_average_X2YA('W' ,work1,wght1,work2) - case('E2UA') - call grid_average_X2YA('N' ,work1,wght1,work2) - case('E2NA') - call grid_average_X2YA('NW',work1,wght1,work2) - case('N2TA') - call grid_average_X2YA('S' ,work1,wght1,work2) - case('N2UA') - call grid_average_X2YA('E' ,work1,wght1,work2) - case('N2EA') - call grid_average_X2YA('SE',work1,wght1,work2) - - case default - call abort_ice(subname//'ERROR: unknown X2Y '//trim(X2Y)) - end select - - end subroutine grid_average_X2Y_1f - -!======================================================================= -! Shifts quantities from one grid to another -! State masked version, simple area weighted averager -! NOTE: Input array includes ghost cells that must be updated before -! calling this routine. -! -! author: T. Craig - - subroutine grid_average_X2YS(dir,work1,wght1,mask1,work2) - - use ice_constants, only: c0 - - character(len=*) , intent(in) :: & - dir - - real (kind=dbl_kind), intent(in) :: & - work1(:,:,:), & - wght1(:,:,:), & - mask1(:,:,:) - - real (kind=dbl_kind), intent(out) :: & - work2(:,:,:) - - ! local variables - - integer (kind=int_kind) :: & - i, j, iblk, & - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - real (kind=dbl_kind) :: & - wtmp - - type (block) :: & - this_block ! block information for current block - - character(len=*), parameter :: subname = '(grid_average_X2YS)' - - work2(:,:,:) = c0 - - select case (trim(dir)) - - case('NE') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + mask1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & - + mask1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & - + mask1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + mask1(i+1,j ,iblk)*work1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & - + mask1(i ,j+1,iblk)*work1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & - + mask1(i+1,j+1,iblk)*work1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('SW') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + mask1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + mask1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + mask1(i-1,j ,iblk)*work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + mask1(i-1,j-1,iblk)*work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('NW') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (mask1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + mask1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & - + mask1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i-1,j ,iblk)*work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + mask1(i-1,j+1,iblk)*work1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & - + mask1(i ,j+1,iblk)*work1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('SE') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (mask1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + mask1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & - + mask1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + mask1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i ,j-1,iblk)*work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + mask1(i+1,j-1,iblk)*work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & - + mask1(i ,j ,iblk)*work1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + mask1(i+1,j ,iblk)*work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('E') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (mask1(i ,j,iblk)*wght1(i ,j,iblk) & - + mask1(i+1,j,iblk)*wght1(i+1,j,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i ,j,iblk)*work1(i ,j,iblk)*wght1(i ,j,iblk) & - + mask1(i+1,j,iblk)*work1(i+1,j,iblk)*wght1(i+1,j,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('W') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (mask1(i-1,j,iblk)*wght1(i-1,j,iblk) & - + mask1(i ,j,iblk)*wght1(i ,j,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i-1,j,iblk)*work1(i-1,j,iblk)*wght1(i-1,j,iblk) & - + mask1(i ,j,iblk)*work1(i ,j,iblk)*wght1(i ,j,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('N') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (mask1(i,j ,iblk)*wght1(i,j ,iblk) & - + mask1(i,j+1,iblk)*wght1(i,j+1,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i,j ,iblk)*work1(i,j ,iblk)*wght1(i,j ,iblk) & - + mask1(i,j+1,iblk)*work1(i,j+1,iblk)*wght1(i,j+1,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('S') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (mask1(i,j-1,iblk)*wght1(i,j-1,iblk) & - + mask1(i,j ,iblk)*wght1(i,j ,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (mask1(i,j-1,iblk)*work1(i,j-1,iblk)*wght1(i,j-1,iblk) & - + mask1(i,j ,iblk)*work1(i,j ,iblk)*wght1(i,j ,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case default - call abort_ice(subname//'ERROR: unknown option '//trim(dir)) - end select - - end subroutine grid_average_X2YS - -!======================================================================= -! Shifts quantities from one grid to another -! State unmasked version, simple weighted averager -! NOTE: Input array includes ghost cells that must be updated before -! calling this routine. -! -! author: T. Craig - - subroutine grid_average_X2YA(dir,work1,wght1,work2) - - use ice_constants, only: c0 - - character(len=*) , intent(in) :: & - dir - - real (kind=dbl_kind), intent(in) :: & - work1(:,:,:), & - wght1(:,:,:) - - real (kind=dbl_kind), intent(out) :: & - work2(:,:,:) - - ! local variables - - integer (kind=int_kind) :: & - i, j, iblk, & - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - real (kind=dbl_kind) :: & - wtmp - - type (block) :: & - this_block ! block information for current block - - character(len=*), parameter :: subname = '(grid_average_X2YA)' - - work2(:,:,:) = c0 - - select case (trim(dir)) - - case('NE') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (wght1(i ,j ,iblk) & - + wght1(i+1,j ,iblk) & - + wght1(i ,j+1,iblk) & - + wght1(i+1,j+1,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & - + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & - + work1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('SW') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (wght1(i ,j ,iblk) & - + wght1(i-1,j ,iblk) & - + wght1(i ,j-1,iblk) & - + wght1(i-1,j-1,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('NW') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (wght1(i-1,j ,iblk) & - + wght1(i ,j ,iblk) & - + wght1(i-1,j+1,iblk) & - + wght1(i ,j+1,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + work1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & - + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('SE') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (wght1(i ,j-1,iblk) & - + wght1(i+1,j-1,iblk) & - + wght1(i ,j ,iblk) & - + wght1(i+1,j ,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & - + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('E') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (wght1(i ,j,iblk) & - + wght1(i+1,j,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (work1(i ,j,iblk)*wght1(i ,j,iblk) & - + work1(i+1,j,iblk)*wght1(i+1,j,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('W') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (wght1(i-1,j,iblk) & - + wght1(i ,j,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (work1(i-1,j,iblk)*wght1(i-1,j,iblk) & - + work1(i ,j,iblk)*wght1(i ,j,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('N') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (wght1(i,j ,iblk) & - + wght1(i,j+1,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (work1(i,j ,iblk)*wght1(i,j ,iblk) & - + work1(i,j+1,iblk)*wght1(i,j+1,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('S') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (wght1(i,j-1,iblk) & - + wght1(i,j ,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (work1(i,j-1,iblk)*wght1(i,j-1,iblk) & - + work1(i,j ,iblk)*wght1(i,j ,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case default - call abort_ice(subname//'ERROR: unknown option '//trim(dir)) - end select - - end subroutine grid_average_X2YA - -!======================================================================= -! Shifts quantities from one grid to another -! Flux masked, original implementation based on earlier t2u and u2t versions -! NOTE: Input array includes ghost cells that must be updated before -! calling this routine. -! -! author: T. Craig - - subroutine grid_average_X2YF(dir,work1,wght1,work2,wght2) - - use ice_constants, only: c0, p25, p5 - - character(len=*) , intent(in) :: & - dir - - real (kind=dbl_kind), intent(in) :: & - work1(:,:,:), & - wght1(:,:,:), & - wght2(:,:,:) - - real (kind=dbl_kind), intent(out) :: & - work2(:,:,:) - - ! local variables - - integer (kind=int_kind) :: & - i, j, iblk, & - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - type (block) :: & - this_block ! block information for current block - - character(len=*), parameter :: subname = '(grid_average_X2YF)' - - work2(:,:,:) = c0 - - select case (trim(dir)) - - case('NE') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - work2(i,j,iblk) = p25 * & - (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk) & - + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk) & - + work1(i+1,j+1,iblk)*wght1(i+1,j+1,iblk)) & - / wght2(i ,j ,iblk) - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('SW') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - work2(i,j,iblk) = p25 * & - (work1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + work1(i-1,j-1,iblk)*wght1(i-1,j-1,iblk)) & - / wght2(i ,j ,iblk) - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('NW') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - work2(i,j,iblk) = p25 * & - (work1(i-1,j ,iblk)*wght1(i-1,j ,iblk) & - + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + work1(i-1,j+1,iblk)*wght1(i-1,j+1,iblk) & - + work1(i ,j+1,iblk)*wght1(i ,j+1,iblk)) & - / wght2(i ,j ,iblk) - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('SE') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - work2(i,j,iblk) = p25 * & - (work1(i ,j-1,iblk)*wght1(i ,j-1,iblk) & - + work1(i+1,j-1,iblk)*wght1(i+1,j-1,iblk) & - + work1(i ,j ,iblk)*wght1(i ,j ,iblk) & - + work1(i+1,j ,iblk)*wght1(i+1,j ,iblk)) & - / wght2(i ,j ,iblk) - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('E') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - work2(i,j,iblk) = p5 * & - (work1(i ,j,iblk)*wght1(i ,j,iblk) & - + work1(i+1,j,iblk)*wght1(i+1,j,iblk)) & - / wght2(i ,j,iblk) - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('W') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - work2(i,j,iblk) = p5 * & - (work1(i-1,j,iblk)*wght1(i-1,j,iblk) & - + work1(i ,j,iblk)*wght1(i ,j,iblk)) & - / wght2(i ,j,iblk) - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('N') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - work2(i,j,iblk) = p5 * & - (work1(i,j ,iblk)*wght1(i,j ,iblk) & - + work1(i,j+1,iblk)*wght1(i,j+1,iblk)) & - / wght2(i ,j,iblk) - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('S') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - work2(i,j,iblk) = p5 * & - (work1(i,j-1,iblk)*wght1(i,j-1,iblk) & - + work1(i,j ,iblk)*wght1(i,j ,iblk)) & - / wght2(i ,j,iblk) - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case default - call abort_ice(subname//'ERROR: unknown option '//trim(dir)) - end select - - end subroutine grid_average_X2YF - -!======================================================================= -! Shifts quantities from one grid to another -! State masked version, simple weighted averager -! NOTE: Input array includes ghost cells that must be updated before -! calling this routine. -! -! author: T. Craig - - subroutine grid_average_X2Y_2(dir,work1a,wght1a,mask1a,work1b,wght1b,mask1b,work2) - - use ice_constants, only: c0 - - character(len=*) , intent(in) :: & - dir - - real (kind=dbl_kind), intent(in) :: & - work1a(:,:,:), work1b(:,:,:), & - wght1a(:,:,:), wght1b(:,:,:), & - mask1a(:,:,:), mask1b(:,:,:) - - real (kind=dbl_kind), intent(out) :: & - work2(:,:,:) - - ! local variables - - integer (kind=int_kind) :: & - i, j, iblk, & - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - real (kind=dbl_kind) :: & - wtmp - - type (block) :: & - this_block ! block information for current block - - character(len=*), parameter :: subname = '(grid_average_X2Y_2)' - - work2(:,:,:) = c0 - - select case (trim(dir)) - - case('NE2US') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (mask1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & - + mask1a(i+1,j ,iblk)*wght1a(i+1,j ,iblk) & - + mask1b(i ,j ,iblk)*wght1b(i ,j ,iblk) & - + mask1b(i ,j+1,iblk)*wght1b(i ,j+1,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (mask1a(i ,j ,iblk)*work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & - + mask1a(i+1,j ,iblk)*work1a(i+1,j ,iblk)*wght1a(i+1,j ,iblk) & - + mask1b(i ,j ,iblk)*work1b(i ,j ,iblk)*wght1b(i ,j ,iblk) & - + mask1b(i ,j+1,iblk)*work1b(i ,j+1,iblk)*wght1b(i ,j+1,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('NE2TS') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (mask1a(i ,j-1,iblk)*wght1a(i ,j-1,iblk) & - + mask1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & - + mask1b(i-1,j ,iblk)*wght1b(i-1,j ,iblk) & - + mask1b(i ,j ,iblk)*wght1b(i ,j ,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (mask1a(i ,j-1,iblk)*work1a(i ,j-1,iblk)*wght1a(i ,j-1,iblk) & - + mask1a(i ,j ,iblk)*work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & - + mask1b(i-1,j ,iblk)*work1b(i-1,j ,iblk)*wght1b(i-1,j ,iblk) & - + mask1b(i ,j ,iblk)*work1b(i ,j ,iblk)*wght1b(i ,j ,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('NE2UA') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (wght1a(i ,j ,iblk) & - + wght1a(i+1,j ,iblk) & - + wght1b(i ,j ,iblk) & - + wght1b(i ,j+1,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & - + work1a(i+1,j ,iblk)*wght1a(i+1,j ,iblk) & - + work1b(i ,j ,iblk)*wght1b(i ,j ,iblk) & - + work1b(i ,j+1,iblk)*wght1b(i ,j+1,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case('NE2TA') - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block,wtmp) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - do j = jlo, jhi - do i = ilo, ihi - wtmp = (wght1a(i ,j-1,iblk) & - + wght1a(i ,j ,iblk) & - + wght1b(i-1,j ,iblk) & - + wght1b(i ,j ,iblk)) - if (wtmp /= c0) & - work2(i,j,iblk) = (work1a(i ,j-1,iblk)*wght1a(i ,j-1,iblk) & - + work1a(i ,j ,iblk)*wght1a(i ,j ,iblk) & - + work1b(i-1,j ,iblk)*wght1b(i-1,j ,iblk) & - + work1b(i ,j ,iblk)*wght1b(i ,j ,iblk)) & - / wtmp - enddo - enddo - enddo - !$OMP END PARALLEL DO - - case default - call abort_ice(subname//'ERROR: unknown option '//trim(dir)) - end select - - end subroutine grid_average_X2Y_2 - -!======================================================================= -! Compute the minimum of adjacent values of a field at specific indices, -! depending on the grid location (U, E, N) -! - real(kind=dbl_kind) function grid_neighbor_min(field, i, j, grid_location) result(mini) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - field ! field defined at T point - - integer (kind=int_kind), intent(in) :: & - i, j - - character(len=*), intent(in) :: & - grid_location ! grid location at which to compute the minumum (U, E, N) - - character(len=*), parameter :: subname = '(grid_neighbor_min)' - - select case (trim(grid_location)) - case('U') - mini = min(field(i,j), field(i+1,j), field(i,j+1), field(i+1,j+1)) - case('E') - mini = min(field(i,j), field(i+1,j)) - case('N') - mini = min(field(i,j), field(i,j+1)) - case default - call abort_ice(subname // ' unknown grid_location: ' // grid_location) - end select - - end function grid_neighbor_min - -!======================================================================= -! Compute the maximum of adjacent values of a field at specific indices, -! depending on the grid location (U, E, N) -! - real(kind=dbl_kind) function grid_neighbor_max(field, i, j, grid_location) result(maxi) - - real (kind=dbl_kind), dimension (nx_block,ny_block), intent(in) :: & - field ! field defined at T point - - integer (kind=int_kind), intent(in) :: & - i, j - - character(len=*), intent(in) :: & - grid_location ! grid location at which to compute the maximum (U, E, N) - - - character(len=*), parameter :: subname = '(grid_neighbor_max)' - - select case (trim(grid_location)) - case('U') - maxi = max(field(i,j), field(i+1,j), field(i,j+1), field(i+1,j+1)) - case('E') - maxi = max(field(i,j), field(i+1,j)) - case('N') - maxi = max(field(i,j), field(i,j+1)) - case default - call abort_ice(subname // ' unknown grid_location: ' // grid_location) - end select - - end function grid_neighbor_max - -!======================================================================= -! The following code is used for obtaining the coordinates of the grid -! vertices for CF-compliant netCDF history output. Approximate! -!======================================================================= - -! These fields are only used for netcdf history output, and the -! ghost cell values are not needed. -! NOTE: Extrapolations were used: these fields are approximate! -! -! authors: A. McLaren, Met Office -! E. Hunke, LANL - - subroutine gridbox_corners - - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c2, c360, & - field_loc_NEcorner, field_type_scalar - use ice_domain_size, only: max_blocks - - integer (kind=int_kind) :: & - i,j,iblk,icorner,& ! index counters - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g2 - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 - - real (kind=dbl_kind) :: & - rad_to_deg - - type (block) :: & - this_block ! block information for current block - - character(len=*), parameter :: subname = '(gridbox_corners)' - - call icepack_query_parameters(rad_to_deg_out=rad_to_deg) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - !------------------------------------------------------------- - ! Get coordinates of grid boxes for each block as follows: - ! (1) SW corner, (2) SE corner, (3) NE corner, (4) NW corner - !------------------------------------------------------------- - - latu_bounds(:,:,:,:) = c0 - lonu_bounds(:,:,:,:) = c0 - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - - latu_bounds(1,i,j,iblk)=TLAT(i ,j ,iblk)*rad_to_deg - latu_bounds(2,i,j,iblk)=TLAT(i+1,j ,iblk)*rad_to_deg - latu_bounds(3,i,j,iblk)=TLAT(i+1,j+1,iblk)*rad_to_deg - latu_bounds(4,i,j,iblk)=TLAT(i ,j+1,iblk)*rad_to_deg - - lonu_bounds(1,i,j,iblk)=TLON(i ,j ,iblk)*rad_to_deg - lonu_bounds(2,i,j,iblk)=TLON(i+1,j ,iblk)*rad_to_deg - lonu_bounds(3,i,j,iblk)=TLON(i+1,j+1,iblk)*rad_to_deg - lonu_bounds(4,i,j,iblk)=TLON(i ,j+1,iblk)*rad_to_deg - - enddo - enddo - enddo - !$OMP END PARALLEL DO - - !---------------------------------------------------------------- - ! extrapolate on global grid to get edge values - !---------------------------------------------------------------- - - if (my_task == master_task) then - allocate(work_g2(nx_global,ny_global)) - else - allocate(work_g2(1,1)) - endif - - work1(:,:,:) = latu_bounds(2,:,:,:) -! work_g2 = c0 - - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do j = 1, ny_global - work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & - - work_g2(nx_global-2,j) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - latu_bounds(2,:,:,:) = work1(:,:,:) - - work1(:,:,:) = latu_bounds(3,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do i = 1, nx_global - work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & - - work_g2(i,ny_global-2) - enddo - do j = 1, ny_global - work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & - - work_g2(nx_global-2,j) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - latu_bounds(3,:,:,:) = work1(:,:,:) - - work1(:,:,:) = latu_bounds(4,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do i = 1, nx_global - work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & - - work_g2(i,ny_global-2) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - latu_bounds(4,:,:,:) = work1(:,:,:) - - work1(:,:,:) = lonu_bounds(2,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do j = 1, ny_global - work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & - - work_g2(nx_global-2,j) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - lonu_bounds(2,:,:,:) = work1(:,:,:) - - work1(:,:,:) = lonu_bounds(3,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do i = 1, nx_global - work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & - - work_g2(i,ny_global-2) - enddo - do j = 1, ny_global - work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & - - work_g2(nx_global-2,j) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - lonu_bounds(3,:,:,:) = work1(:,:,:) - - work1(:,:,:) = lonu_bounds(4,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do i = 1, nx_global - work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & - - work_g2(i,ny_global-2) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - lonu_bounds(4,:,:,:) = work1(:,:,:) - - deallocate(work_g2) - - !---------------------------------------------------------------- - ! Convert longitude to Degrees East >0 for history output - !---------------------------------------------------------------- - - allocate(work_g2(nx_block,ny_block)) ! not used as global here - !OMP fails in this loop - do iblk = 1, nblocks - do icorner = 1, 4 - work_g2(:,:) = lont_bounds(icorner,:,:,iblk) + c360 - where (work_g2 > c360) work_g2 = work_g2 - c360 - where (work_g2 < c0 ) work_g2 = work_g2 + c360 - lont_bounds(icorner,:,:,iblk) = work_g2(:,:) - work_g2(:,:) = lonu_bounds(icorner,:,:,iblk) + c360 - where (work_g2 > c360) work_g2 = work_g2 - c360 - where (work_g2 < c0 ) work_g2 = work_g2 + c360 - lonu_bounds(icorner,:,:,iblk) = work_g2(:,:) - enddo - enddo - deallocate(work_g2) - - end subroutine gridbox_corners - -!======================================================================= -! The following code is used for obtaining the coordinates of the grid -! vertices for CF-compliant netCDF history output. Approximate! -!======================================================================= - -! These fields are only used for netcdf history output, and the -! ghost cell values are not needed. -! NOTE: Extrapolations were used: these fields are approximate! -! - - subroutine gridbox_edges - - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c2, c360, & - field_loc_NEcorner, field_type_scalar - use ice_domain_size, only: max_blocks - - integer (kind=int_kind) :: & - i,j,iblk,icorner,& ! index counters - ilo,ihi,jlo,jhi ! beginning and end of physical domain - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g2 - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 - - real (kind=dbl_kind) :: & - rad_to_deg - - type (block) :: & - this_block ! block information for current block - - character(len=*), parameter :: subname = '(gridbox_edges)' - - call icepack_query_parameters(rad_to_deg_out=rad_to_deg) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - !------------------------------------------------------------- - ! Get coordinates of grid boxes for each block as follows: - ! for N pt: (1) W edge, (2) E edge, (3) E edge j+1, (4) W edge j+1 - ! for E pt: (1) S edge, (2) S edge i+1, (3) N edge, i+1 (4) N edge - !------------------------------------------------------------- - - latn_bounds(:,:,:,:) = c0 - lonn_bounds(:,:,:,:) = c0 - late_bounds(:,:,:,:) = c0 - lone_bounds(:,:,:,:) = c0 - - !$OMP PARALLEL DO PRIVATE(iblk,i,j,ilo,ihi,jlo,jhi,this_block) - do iblk = 1, nblocks - this_block = get_block(blocks_ice(iblk),iblk) - ilo = this_block%ilo - ihi = this_block%ihi - jlo = this_block%jlo - jhi = this_block%jhi - - do j = jlo, jhi - do i = ilo, ihi - - latn_bounds(1,i,j,iblk)=ELAT(i-1,j ,iblk)*rad_to_deg - latn_bounds(2,i,j,iblk)=ELAT(i ,j ,iblk)*rad_to_deg - latn_bounds(3,i,j,iblk)=ELAT(i ,j+1,iblk)*rad_to_deg - latn_bounds(4,i,j,iblk)=ELAT(i-1,j+1,iblk)*rad_to_deg - - lonn_bounds(1,i,j,iblk)=ELON(i-1,j ,iblk)*rad_to_deg - lonn_bounds(2,i,j,iblk)=ELON(i ,j ,iblk)*rad_to_deg - lonn_bounds(3,i,j,iblk)=ELON(i ,j+1,iblk)*rad_to_deg - lonn_bounds(4,i,j,iblk)=ELON(i-1,j+1,iblk)*rad_to_deg - - late_bounds(1,i,j,iblk)=NLAT(i ,j-1,iblk)*rad_to_deg - late_bounds(2,i,j,iblk)=NLAT(i+1,j-1,iblk)*rad_to_deg - late_bounds(3,i,j,iblk)=NLAT(i+1,j ,iblk)*rad_to_deg - late_bounds(4,i,j,iblk)=NLAT(i ,j ,iblk)*rad_to_deg - - lone_bounds(1,i,j,iblk)=NLON(i ,j-1,iblk)*rad_to_deg - lone_bounds(2,i,j,iblk)=NLON(i+1,j-1,iblk)*rad_to_deg - lone_bounds(3,i,j,iblk)=NLON(i+1,j ,iblk)*rad_to_deg - lone_bounds(4,i,j,iblk)=NLON(i ,j ,iblk)*rad_to_deg - - enddo - enddo - enddo - !$OMP END PARALLEL DO - - !---------------------------------------------------------------- - ! extrapolate on global grid to get edge values - !---------------------------------------------------------------- - - if (my_task == master_task) then - allocate(work_g2(nx_global,ny_global)) - else - allocate(work_g2(1,1)) - endif - - ! latn_bounds - - work1(:,:,:) = latn_bounds(1,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do j = 1, ny_global - work_g2(1,j) = c2*work_g2(2,j) & - - work_g2(3,j) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - latn_bounds(1,:,:,:) = work1(:,:,:) - - work1(:,:,:) = latn_bounds(3,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do i = 1, nx_global - work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & - - work_g2(i,ny_global-2) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - latn_bounds(3,:,:,:) = work1(:,:,:) - - work1(:,:,:) = latn_bounds(4,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do i = 1, nx_global - work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & - - work_g2(i,ny_global-2) - enddo - do j = 1, ny_global - work_g2(1,j) = c2*work_g2(2,j) & - - work_g2(3,j) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - latn_bounds(4,:,:,:) = work1(:,:,:) - - ! lonn_bounds - - work1(:,:,:) = lonn_bounds(1,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do j = 1, ny_global - work_g2(1,j) = c2*work_g2(2,j) & - - work_g2(3,j) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - lonn_bounds(1,:,:,:) = work1(:,:,:) - - work1(:,:,:) = lonn_bounds(3,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do i = 1, nx_global - work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & - - work_g2(i,ny_global-2) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - lonn_bounds(3,:,:,:) = work1(:,:,:) - - work1(:,:,:) = lonn_bounds(4,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do i = 1, nx_global - work_g2(i,ny_global) = c2*work_g2(i,ny_global-1) & - - work_g2(i,ny_global-2) - enddo - do j = 1, ny_global - work_g2(1,j) = c2*work_g2(2,j) & - - work_g2(3,j) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - lonn_bounds(4,:,:,:) = work1(:,:,:) - - ! late_bounds - - work1(:,:,:) = late_bounds(1,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do i = 1, nx_global - work_g2(i,1) = c2*work_g2(i,2) & - - work_g2(i,3) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - late_bounds(1,:,:,:) = work1(:,:,:) - - work1(:,:,:) = late_bounds(2,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do i = 1, nx_global - work_g2(i,1) = c2*work_g2(i,2) & - - work_g2(i,3) - enddo - do j = 1, ny_global - work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & - - work_g2(nx_global-2,j) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - late_bounds(2,:,:,:) = work1(:,:,:) - - work1(:,:,:) = late_bounds(3,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do j = 1, ny_global - work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & - - work_g2(nx_global-2,j) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - late_bounds(3,:,:,:) = work1(:,:,:) - - ! lone_bounds - - work1(:,:,:) = lone_bounds(1,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do i = 1, nx_global - work_g2(i,1) = c2*work_g2(i,2) & - - work_g2(i,3) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - lone_bounds(1,:,:,:) = work1(:,:,:) - - work1(:,:,:) = lone_bounds(2,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do i = 1, nx_global - work_g2(i,1) = c2*work_g2(i,2) & - - work_g2(i,3) - enddo - do j = 1, ny_global - work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & - - work_g2(nx_global-2,j) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - lone_bounds(2,:,:,:) = work1(:,:,:) - - work1(:,:,:) = lone_bounds(3,:,:,:) - call gather_global(work_g2, work1, master_task, distrb_info) - if (my_task == master_task) then - do j = 1, ny_global - work_g2(nx_global,j) = c2*work_g2(nx_global-1,j) & - - work_g2(nx_global-2,j) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - lone_bounds(3,:,:,:) = work1(:,:,:) - - deallocate(work_g2) - - !---------------------------------------------------------------- - ! Convert longitude to Degrees East >0 for history output - !---------------------------------------------------------------- - - allocate(work_g2(nx_block,ny_block)) ! not used as global here - !OMP fails in this loop - do iblk = 1, nblocks - do icorner = 1, 4 - work_g2(:,:) = lonn_bounds(icorner,:,:,iblk) + c360 - where (work_g2 > c360) work_g2 = work_g2 - c360 - where (work_g2 < c0 ) work_g2 = work_g2 + c360 - lonn_bounds(icorner,:,:,iblk) = work_g2(:,:) - work_g2(:,:) = lone_bounds(icorner,:,:,iblk) + c360 - where (work_g2 > c360) work_g2 = work_g2 - c360 - where (work_g2 < c0 ) work_g2 = work_g2 + c360 - lone_bounds(icorner,:,:,iblk) = work_g2(:,:) - enddo - enddo - deallocate(work_g2) - - end subroutine gridbox_edges - -!======================================================================= - -! NOTE: Boundary conditions for fields on NW, SW, SE corners -! have not been implemented; using NE corner location for all. -! Extrapolations are also used: these fields are approximate! -! -! authors: A. McLaren, Met Office -! E. Hunke, LANL - - subroutine gridbox_verts(work_g,vbounds) - - use ice_blocks, only: nx_block, ny_block - use ice_constants, only: c0, c2, & - field_loc_NEcorner, field_type_scalar - use ice_domain_size, only: max_blocks - - real (kind=dbl_kind), dimension(:,:), intent(in) :: & - work_g - - real (kind=dbl_kind), dimension(4,nx_block,ny_block,max_blocks), intent(out) :: & - vbounds - - integer (kind=int_kind) :: & - i,j ! index counters - - real (kind=dbl_kind) :: & - rad_to_deg - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g2 - - real (kind=dbl_kind), dimension (nx_block,ny_block,max_blocks) :: & - work1 - - character(len=*), parameter :: subname = '(gridbox_verts)' - - call icepack_query_parameters(rad_to_deg_out=rad_to_deg) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - if (my_task == master_task) then - allocate(work_g2(nx_global,ny_global)) - else - allocate(work_g2(1,1)) - endif - - !------------------------------------------------------------- - ! Get coordinates of grid boxes for each block as follows: - ! (1) SW corner, (2) SE corner, (3) NE corner, (4) NW corner - !------------------------------------------------------------- - - work_g2(:,:) = c0 - if (my_task == master_task) then - do j = 2, ny_global - do i = 2, nx_global - work_g2(i,j) = work_g(i-1,j-1) * rad_to_deg - enddo - enddo - ! extrapolate - do j = 1, ny_global - work_g2(1,j) = c2*work_g2(2,j) - work_g2(3,j) - enddo - do i = 1, nx_global - work_g2(i,1) = c2*work_g2(i,2) - work_g2(i,3) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - vbounds(1,:,:,:) = work1(:,:,:) - - work_g2(:,:) = c0 - if (my_task == master_task) then - do j = 2, ny_global - do i = 1, nx_global - work_g2(i,j) = work_g(i,j-1) * rad_to_deg - enddo - enddo - ! extrapolate - do i = 1, nx_global - work_g2(i,1) = (c2*work_g2(i,2) - work_g2(i,3)) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - vbounds(2,:,:,:) = work1(:,:,:) - - work_g2(:,:) = c0 - if (my_task == master_task) then - do j = 1, ny_global - do i = 1, nx_global - work_g2(i,j) = work_g(i,j) * rad_to_deg - enddo - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - vbounds(3,:,:,:) = work1(:,:,:) - - work_g2(:,:) = c0 - if (my_task == master_task) then - do j = 1, ny_global - do i = 2, nx_global - work_g2(i,j) = work_g(i-1,j ) * rad_to_deg - enddo - enddo - ! extrapolate - do j = 1, ny_global - work_g2(1,j) = c2*work_g2(2,j) - work_g2(3,j) - enddo - endif - call scatter_global(work1, work_g2, & - master_task, distrb_info, & - field_loc_NEcorner, field_type_scalar) - vbounds(4,:,:,:) = work1(:,:,:) - - deallocate (work_g2) - - end subroutine gridbox_verts - -!======================================================================= -! ocean bathymetry for grounded sea ice (seabed stress) or icebergs -! currently hardwired for 40 levels (gx3, gx1 grids) -! should be read from a file instead (see subroutine read_seabedstress_bathy) - - subroutine get_bathymetry - - use ice_constants, only: c0 - - integer (kind=int_kind) :: & - i, j, k, iblk ! loop indices - - integer (kind=int_kind), parameter :: & - nlevel = 40 ! number of layers (gx3 grid) - - real (kind=dbl_kind), dimension(nlevel) :: & - depth ! total depth, m - - real (kind=dbl_kind) :: & - puny - - logical (kind=log_kind) :: & - calc_dragio - - real (kind=dbl_kind), dimension(nlevel), parameter :: & - thick = (/ & ! ocean layer thickness, m - 10.01244_dbl_kind, 10.11258_dbl_kind, 10.31682_dbl_kind, & - 10.63330_dbl_kind, 11.07512_dbl_kind, 11.66145_dbl_kind, & - 12.41928_dbl_kind, 13.38612_dbl_kind, 14.61401_dbl_kind, & - 16.17561_dbl_kind, 18.17368_dbl_kind, 20.75558_dbl_kind, & - 24.13680_dbl_kind, 28.63821_dbl_kind, 34.74644_dbl_kind, & - 43.20857_dbl_kind, 55.16812_dbl_kind, 72.30458_dbl_kind, & - 96.74901_dbl_kind, 130.0392_dbl_kind, 170.0489_dbl_kind, & - 207.9933_dbl_kind, 233.5694_dbl_kind, 245.2719_dbl_kind, & - 248.9804_dbl_kind, 249.8322_dbl_kind, 249.9787_dbl_kind, & - 249.9979_dbl_kind, 249.9998_dbl_kind, 250.0000_dbl_kind, & - 250.0000_dbl_kind, 250.0000_dbl_kind, 250.0000_dbl_kind, & - 250.0000_dbl_kind, 250.0000_dbl_kind, 250.0000_dbl_kind, & - 250.0000_dbl_kind, 250.0000_dbl_kind, 250.0000_dbl_kind, & - 250.0000_dbl_kind /) - - character(len=*), parameter :: subname = '(get_bathymetry)' - - call icepack_query_parameters(puny_out=puny, calc_dragio_out=calc_dragio) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - if (use_bathymetry) then - - call read_seabedstress_bathy - - else - - ! convert to total depth - depth(1) = thick(1) - do k = 2, nlevel - depth(k) = depth(k-1) + thick(k) - enddo - - bathymetry = c0 - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - k = min(nint(kmt(i,j,iblk)),nlevel) - if (k > nlevel) call abort_ice(subname//' kmt gt nlevel error') - if (k > 0) bathymetry(i,j,iblk) = depth(k) - enddo - enddo - enddo - - ! For consistency, set thickness_ocn_layer1 in Icepack if 'calc_dragio' is active - if (calc_dragio) then - call icepack_init_parameters(thickness_ocn_layer1_in=thick(1)) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - endif - - endif ! bathymetry_file - - end subroutine get_bathymetry - -!======================================================================= -! with use_bathymetry = false, vertical depth profile generated for max KMT -! with use_bathymetry = true, expects to read in pop vert_grid file - - subroutine get_bathymetry_popfile - - integer (kind=int_kind) :: & - i, j, k, iblk ! loop indices - - integer (kind=int_kind) :: & - ntmp, nlevel , & ! number of levels (max KMT) - k1 , & ! levels - ierr , & ! error tag - fid ! fid unit number - - real (kind=dbl_kind), dimension(:),allocatable :: & - depth , & ! total depth, m - thick ! layer thickness, cm -> m - - logical (kind=log_kind) :: & - calc_dragio - - character(len=*), parameter :: subname = '(get_bathymetry_popfile)' - - ntmp = maxval(nint(KMT)) - nlevel = global_maxval(ntmp,distrb_info) - - if (my_task==master_task) then - write(nu_diag,*) subname,' KMT max = ',nlevel - endif - - allocate(depth(nlevel),thick(nlevel)) - thick = -999999. - depth = -999999. - - if (use_bathymetry) then - - write (nu_diag,*) subname,' Bathymetry file = ', trim(bathymetry_file) - if (my_task == master_task) then - call get_fileunit(fid) - open(fid,file=bathymetry_file,form='formatted',iostat=ierr) - if (ierr/=0) call abort_ice(subname//' open error') - do k = 1,nlevel - read(fid,*,iostat=ierr) thick(k) - if (ierr/=0) call abort_ice(subname//' read error') - enddo - call release_fileunit(fid) - endif - - call broadcast_array(thick,master_task) - - else - - ! create thickness profile - k1 = min(5,nlevel) - do k = 1,k1 - thick(k) = max(10000._dbl_kind/float(nlevel),500._dbl_kind) - enddo - do k = k1+1,nlevel - thick(k) = min(thick(k-1)*1.2_dbl_kind,20000._dbl_kind) - enddo - - endif - - ! convert thick from cm to m - thick = thick / 100._dbl_kind - - ! convert to total depth - depth(1) = thick(1) - do k = 2, nlevel - depth(k) = depth(k-1) + thick(k) - if (depth(k) < 0.) call abort_ice(subname//' negative depth error') - enddo - - if (my_task==master_task) then - do k = 1,nlevel - write(nu_diag,'(2a,i6,2f13.7)') subname,' k, thick(m), depth(m) = ',k,thick(k),depth(k) - enddo - endif - - bathymetry = 0._dbl_kind - do iblk = 1, nblocks - do j = 1, ny_block - do i = 1, nx_block - k = nint(kmt(i,j,iblk)) - if (k > nlevel) call abort_ice(subname//' kmt gt nlevel error') - if (k > 0) bathymetry(i,j,iblk) = depth(k) - enddo - enddo - enddo - - ! For consistency, set thickness_ocn_layer1 in Icepack if 'calc_dragio' is active - call icepack_query_parameters(calc_dragio_out=calc_dragio) - if (calc_dragio) then - call icepack_init_parameters(thickness_ocn_layer1_in=thick(1)) - endif - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - deallocate(depth,thick) - - end subroutine get_bathymetry_popfile - -!======================================================================= - -! Read bathymetry data for seabed stress calculation (grounding scheme for -! landfast ice) in CICE stand-alone mode. When CICE is in coupled mode -! (e.g. CICE-NEMO), hwater should be uptated at each time level so that -! it varies with ocean dynamics. -! -! author: Fred Dupont, CMC - - subroutine read_seabedstress_bathy - - ! use module - use ice_read_write - use ice_constants, only: field_loc_center, field_type_scalar - - ! local variables - integer (kind=int_kind) :: & - fid_init ! file id for netCDF init file - - character (char_len_long) :: & ! input data file names - fieldname - - logical (kind=log_kind) :: diag=.true. - - character(len=*), parameter :: subname = '(read_seabedstress_bathy)' - - if (my_task == master_task) then - write (nu_diag,*) ' ' - write (nu_diag,*) 'Bathymetry file: ', trim(bathymetry_file) - call icepack_warnings_flush(nu_diag) - endif - - call ice_open_nc(bathymetry_file,fid_init) - - fieldname='Bathymetry' - - if (my_task == master_task) then - write(nu_diag,*) 'reading ',TRIM(fieldname) - call icepack_warnings_flush(nu_diag) - endif - call ice_read_nc(fid_init,1,fieldname,bathymetry,diag, & - field_loc=field_loc_center, & - field_type=field_type_scalar) - - call ice_close_nc(fid_init) - - if (my_task == master_task) then - write(nu_diag,*) 'closing file ',TRIM(bathymetry_file) - call icepack_warnings_flush(nu_diag) - endif - - end subroutine read_seabedstress_bathy - -!======================================================================= - - end module ice_grid - -!======================================================================= diff --git a/cicecore/cicedynB/infrastructure/ice_read_write.F90 b/cicecore/cicedynB/infrastructure/ice_read_write.F90 deleted file mode 100644 index 6332980f0..000000000 --- a/cicecore/cicedynB/infrastructure/ice_read_write.F90 +++ /dev/null @@ -1,2918 +0,0 @@ -#ifdef ncdf -#define USE_NETCDF -#endif -!======================================================================= - -! Routines for opening, reading and writing external files -! -! author: Tony Craig, NCAR -! -! 2004: Block structure added by William Lipscomb, LANL -! 2006: Converted to free source form (F90) by Elizabeth Hunke -! 2007: netcdf versions added by Alison McLaren & Ann Keen, Met Office - - module ice_read_write - - use ice_kinds_mod - use ice_constants, only: c0, spval_dbl, & - field_loc_noupdate, field_type_noupdate - use ice_communicate, only: my_task, master_task - use ice_broadcast, only: broadcast_scalar - use ice_domain, only: distrb_info, orca_halogrid - use ice_domain_size, only: max_blocks, nx_global, ny_global, ncat - use ice_blocks, only: nx_block, ny_block, nghost - use ice_exit, only: abort_ice - use ice_fileunits, only: nu_diag - -#ifdef USE_NETCDF - use netcdf -#endif - - implicit none - - private - - integer (kind=int_kind), parameter, private :: & - bits_per_byte = 8 ! number of bits per byte. - ! used to determine RecSize in ice_open - - public :: ice_open, & - ice_open_ext, & - ice_open_nc, & - ice_read, & - ice_read_ext, & - ice_read_nc, & - ice_read_global, & - ice_read_global_nc, & - ice_read_nc_uv, & - ice_read_nc_xyf, & - ice_write, & - ice_write_nc, & - ice_write_ext, & - ice_read_vec_nc, & - ice_get_ncvarsize, & - ice_close_nc - - interface ice_write - module procedure ice_write_xyt, & - ice_write_xyzt - end interface - - interface ice_read - module procedure ice_read_xyt, & - ice_read_xyzt - end interface - - interface ice_read_nc - module procedure ice_read_nc_xy, & - ice_read_nc_xyz, & - !ice_read_nc_xyf, & - ice_read_nc_point, & - ice_read_nc_1D, & - ice_read_nc_2D, & - ice_read_nc_3D, & - ice_read_nc_z - end interface - - interface ice_write_nc - module procedure ice_write_nc_xy, & - ice_write_nc_xyz - end interface - -!======================================================================= - - contains - -!======================================================================= - -! Opens an unformatted file for reading. -! nbits indicates whether the file is sequential or direct access. -! -! author: Tony Craig, NCAR - - subroutine ice_open(nu, filename, nbits, algn) - - integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nbits ! no. of bits per variable (0 for sequential access) - - integer (kind=int_kind), intent(in), optional :: algn - integer (kind=int_kind) :: RecSize, Remnant, nbytes - - character (*) :: filename - - character(len=*), parameter :: subname = '(ice_open)' - - if (my_task == master_task) then - - if (nbits == 0) then ! sequential access - - open(nu,file=filename,form='unformatted') - - else ! direct access - - ! use nbytes to compute RecSize. - ! this prevents integer overflow with large global grids using nbits - ! nx*ny*nbits > 2^31 -1 (i.e., global grid 9000x7054x64) - nbytes = nbits/bits_per_byte - RecSize = nx_global*ny_global*nbytes - - if (present(algn)) then - ! If data is keept in blocks using given sizes (=algn) - ! Used in eg. HYCOM binary files, which are stored as "blocks" dividable by 16384 bit (=algn) - if (algn /= 0) then - Remnant = modulo(RecSize,algn) - if (Remnant /= 0) then - RecSize = RecSize + (algn - Remnant) - endif - endif - endif - open(nu,file=filename,recl=RecSize, & - form='unformatted',access='direct') - endif ! nbits = 0 - - endif ! my_task = master_task - - end subroutine ice_open - -!======================================================================= - -! Opens an unformatted file for reading, incl ghost cells (direct access). -! nbits indicates whether the file is sequential or direct access. -! -! authors: Tony Craig, NCAR -! David Hebert, NRLSSC - - subroutine ice_open_ext(nu, filename, nbits) - - integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nbits ! no. of bits per variable (0 for sequential access) - - integer (kind=int_kind) :: RecSize, nbytes - - character (*) :: filename - - integer (kind=int_kind) :: & - nx, ny ! grid dimensions including ghost cells - - character(len=*), parameter :: subname = '(ice_open_ext)' - - if (my_task == master_task) then - - if (nbits == 0) then ! sequential access - - open(nu,file=filename,form='unformatted') - - else ! direct access - - nx = nx_global + 2*nghost - ny = ny_global + 2*nghost - - ! use nbytes to compute RecSize. - ! this prevents integer overflow with large global grids using nbits - ! nx*ny*nbits > 2^31 -1 (i.e., global grid 9000x7054x64) - nbytes = nbits/bits_per_byte - RecSize = nx*ny*nbytes - open(nu,file=filename,recl=RecSize, & - form='unformatted',access='direct') - endif ! nbits = 0 - - endif ! my_task = master_task - - end subroutine ice_open_ext - -!======================================================================= - -! Read an unformatted file and scatter to processors. -! work is a real array, atype indicates the format of the data. -! If the optional variables field_loc and field_type are present, -! the ghost cells are filled using values from the global array. -! This prevents them from being filled with zeroes in land cells -! (subroutine ice_HaloUpdate need not be called). -! -! author: Tony Craig, NCAR - - subroutine ice_read_xyt(nu, nrec, work, atype, diag, & - field_loc, field_type, & - ignore_eof, hit_eof) - - use ice_gather_scatter, only: scatter_global - - integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) - - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(out) :: & - work ! output array (real, 8-byte) - - character (len=4), intent(in) :: & - atype ! format for input array - ! (real/integer, 4-byte/8-byte) - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) - - logical (kind=log_kind), optional, intent(in) :: ignore_eof - logical (kind=log_kind), optional, intent(out) :: hit_eof - - ! local variables - - integer (kind=int_kind) :: i, j, ios - - real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array - - logical (kind=log_kind) :: ignore_eof_use - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1 - - real (kind=real_kind), dimension(:,:), allocatable :: & - work_gr - - integer(kind=int_kind), dimension(:,:), allocatable :: & - work_gi4 - - integer(selected_int_kind(13)), dimension(:,:), allocatable :: & - work_gi8 - - character(len=*), parameter :: subname = '(ice_read_xyt)' - - if (my_task == master_task) then - allocate(work_g1(nx_global,ny_global)) - else - allocate(work_g1(1,1)) ! to save memory - endif - - if (my_task == master_task) then - - !------------------------------------------------------------------- - ! Read global array according to format atype - !------------------------------------------------------------------- - if (present(hit_eof)) hit_eof = .false. - - if (atype == 'ida4') then - allocate(work_gi4(nx_global,ny_global)) - read(nu,rec=nrec) work_gi4 - work_g1 = real(work_gi4,kind=dbl_kind) - deallocate(work_gi4) - elseif (atype == 'ida8') then - allocate(work_gi8(nx_global,ny_global)) - read(nu,rec=nrec) work_gi8 - work_g1 = real(work_gi8,kind=dbl_kind) - deallocate(work_gi8) - elseif (atype == 'rda4') then - allocate(work_gr(nx_global,ny_global)) - read(nu,rec=nrec) work_gr - work_g1 = work_gr - deallocate(work_gr) - elseif (atype == 'rda8') then - read(nu,rec=nrec) work_g1 - elseif (atype == 'ruf8') then - if (present(ignore_eof)) then - ignore_eof_use = ignore_eof - else - ignore_eof_use = .false. - endif - if (ignore_eof_use) then - ! Read line from file, checking for end-of-file - read(nu, iostat=ios) ((work_g1(i,j),i=1,nx_global), & - j=1,ny_global) - if (present(hit_eof)) hit_eof = ios < 0 - else - read(nu) ((work_g1(i,j),i=1,nx_global),j=1,ny_global) - endif - else - write(nu_diag,*) subname,' ERROR: reading unknown atype ',atype - endif - endif ! my_task = master_task - - if (present(hit_eof)) then - call broadcast_scalar(hit_eof,master_task) - if (hit_eof) then - deallocate(work_g1) - return - endif - endif - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - if (my_task==master_task .and. diag) then - amin = minval(work_g1) - amax = maxval(work_g1, mask = work_g1 /= spval_dbl) - asum = sum(work_g1, mask = work_g1 /= spval_dbl) - write(nu_diag,*) subname,' read_global ',nu, nrec, amin, amax, asum - endif - - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are not updated unless field_loc is present. - !------------------------------------------------------------------- - - if (present(field_loc)) then - call scatter_global(work, work_g1, master_task, distrb_info, & - field_loc, field_type) - else - call scatter_global(work, work_g1, master_task, distrb_info, & - field_loc_noupdate, field_type_noupdate) - endif - - deallocate(work_g1) - - end subroutine ice_read_xyt - -!======================================================================= -! Read an unformatted file and scatter to processors. -! work is a real array, atype indicates the format of the data. -! If the optional variables field_loc and field_type are present, -! the ghost cells are filled using values from the global array. -! This prevents them from being filled with zeroes in land cells -! (subroutine ice_HaloUpdate need not be called). -! -! author: Tony Craig, NCAR - - subroutine ice_read_xyzt(nu, nrec, work, atype, diag, & - field_loc, field_type, & - ignore_eof, hit_eof) - - use ice_gather_scatter, only: scatter_global - use ice_domain_size, only: nblyr - - integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) - - real (kind=dbl_kind), dimension(nx_block,ny_block,nblyr+2,max_blocks), intent(out) :: & - work ! output array (real, 8-byte) - - character (len=4), intent(in) :: & - atype ! format for input array - ! (real/integer, 4-byte/8-byte) - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) - - logical (kind=log_kind), optional, intent(in) :: ignore_eof - logical (kind=log_kind), optional, intent(out) :: hit_eof - - ! local variables - - integer (kind=int_kind) :: i, j, k, ios - - real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array - - logical (kind=log_kind) :: ignore_eof_use - - - real (kind=dbl_kind), dimension(:,:,:), allocatable :: & - work_g4 - - integer(kind=int_kind), dimension(:,:,:), allocatable :: & - work_gi5 - - integer(selected_int_kind(13)), dimension(:,:,:), allocatable :: & - work_gi9 - - real (kind=real_kind), dimension(:,:,:), allocatable :: & - work_gr3 - - character(len=*), parameter :: subname = '(ice_read_xyzt)' - - if (my_task == master_task) then - allocate(work_g4(nx_global,ny_global,nblyr+2)) - else - allocate(work_g4(1,1,nblyr+2)) ! to save memory - endif - - if (my_task == master_task) then - - !------------------------------------------------------------------- - ! Read global array according to format atype - !------------------------------------------------------------------- - if (present(hit_eof)) hit_eof = .false. - - if (atype == 'ida4') then - allocate(work_gi5(nx_global,ny_global,nblyr+2)) - read(nu,rec=nrec) work_gi5 - work_g4 = real(work_gi5,kind=dbl_kind) - deallocate(work_gi5) - elseif (atype == 'ida8') then - allocate(work_gi9(nx_global,ny_global,nblyr+2)) - read(nu,rec=nrec) work_gi9 - work_g4 = real(work_gi9,kind=dbl_kind) - deallocate(work_gi9) - elseif (atype == 'rda4') then - allocate(work_gr3(nx_global,ny_global,nblyr+2)) - read(nu,rec=nrec) work_gr3 - work_g4 = work_gr3 - deallocate(work_gr3) - elseif (atype == 'rda8') then - read(nu,rec=nrec) work_g4 - elseif (atype == 'ruf8') then - if (present(ignore_eof)) then - ignore_eof_use = ignore_eof - else - ignore_eof_use = .false. - endif - if (ignore_eof_use) then - ! Read line from file, checking for end-of-file - read(nu, iostat=ios) (((work_g4(i,j,k),i=1,nx_global), & - j=1,ny_global), & - k=1,nblyr+2) - if (present(hit_eof)) hit_eof = ios < 0 - else - read(nu) (((work_g4(i,j,k),i=1,nx_global),j=1,ny_global),& - k=1,nblyr+2) - endif - else - write(nu_diag,*) subname,' ERROR: reading unknown atype ',atype - endif - endif ! my_task = master_task - - if (present(hit_eof)) then - call broadcast_scalar(hit_eof,master_task) - if (hit_eof) then - deallocate(work_g4) - return - endif - endif - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - if (my_task==master_task .and. diag) then - amin = minval(work_g4) - amax = maxval(work_g4, mask = work_g4 /= spval_dbl) - asum = sum (work_g4, mask = work_g4 /= spval_dbl) - write(nu_diag,*) subname,' read_global ',nu, nrec, amin, amax, asum - endif - - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are not updated unless field_loc is present. - !------------------------------------------------------------------- - - do k = 1, nblyr+2 - - if (present(field_loc)) then - call scatter_global(work(:,:,k,:), work_g4(:,:,k), master_task, distrb_info, & - field_loc, field_type) - - else - - call scatter_global(work(:,:,k,:), work_g4(:,:,k), master_task, distrb_info, & - field_loc_noupdate, field_type_noupdate) - endif - - enddo !k - deallocate(work_g4) - - end subroutine ice_read_xyzt - -!======================================================================= - -! Read an unformatted file -! Just like ice_read except that it returns a global array. -! work_g is a real array, atype indicates the format of the data -! -! Adapted by William Lipscomb, LANL, from ice_read - - subroutine ice_read_global (nu, nrec, work_g, atype, diag, & - ignore_eof, hit_eof) - - integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) - - real (kind=dbl_kind), dimension(nx_global,ny_global), intent(out) :: & - work_g ! output array (real, 8-byte) - - character (len=4) :: & - atype ! format for input array - ! (real/integer, 4-byte/8-byte) - - logical (kind=log_kind) :: & - diag ! if true, write diagnostic output - - logical (kind=log_kind), optional, intent(in) :: ignore_eof - logical (kind=log_kind), optional, intent(out) :: hit_eof - - ! local variables - - integer (kind=int_kind) :: i, j, ios - - real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array - - logical (kind=log_kind) :: ignore_eof_use - - real (kind=real_kind), dimension(:,:), allocatable :: & - work_gr - - integer(kind=int_kind), dimension(:,:), allocatable :: & - work_gi4 - - integer(selected_int_kind(13)), dimension(:,:), allocatable :: & - work_gi8 - - character(len=*), parameter :: subname = '(ice_read_global)' - - work_g(:,:) = c0 - - if (my_task == master_task) then - - !------------------------------------------------------------------- - ! Read global array according to format atype - !------------------------------------------------------------------- - if (present(hit_eof)) hit_eof = .false. - - if (atype == 'ida4') then - allocate(work_gi4(nx_global,ny_global)) - read(nu,rec=nrec) work_gi4 - work_g = real(work_gi4,kind=dbl_kind) - deallocate(work_gi4) - elseif (atype == 'ida8') then - allocate(work_gi8(nx_global,ny_global)) - read(nu,rec=nrec) work_gi8 - work_g = real(work_gi8,kind=dbl_kind) - deallocate(work_gi8) - elseif (atype == 'rda4') then - allocate(work_gr(nx_global,ny_global)) - read(nu,rec=nrec) work_gr - work_g = work_gr - deallocate(work_gr) - elseif (atype == 'rda8') then - read(nu,rec=nrec) work_g - elseif (atype == 'ruf8') then - if (present(ignore_eof)) then - ignore_eof_use = ignore_eof - else - ignore_eof_use = .false. - endif - if (ignore_eof_use) then - ! Read line from file, checking for end-of-file - read(nu, iostat=ios) ((work_g(i,j),i=1,nx_global), & - j=1,ny_global) - if (present(hit_eof)) hit_eof = ios < 0 - else - read(nu) ((work_g(i,j),i=1,nx_global),j=1,ny_global) - endif - else - write(nu_diag,*) subname,' ERROR: reading unknown atype ',atype - endif - endif ! my_task = master_task - - if (present(hit_eof)) then - call broadcast_scalar(hit_eof,master_task) - if (hit_eof) return - endif - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - if (my_task == master_task .and. diag) then - amin = minval(work_g) - amax = maxval(work_g, mask = work_g /= spval_dbl) - asum = sum (work_g, mask = work_g /= spval_dbl) - write(nu_diag,*) subname,' read_global ',nu, nrec, amin, amax,asum - endif - - end subroutine ice_read_global - -!======================================================================= - -! Read an unformatted file and scatter to processors, incl ghost cells. -! work is a real array, atype indicates the format of the data. -! (subroutine ice_HaloUpdate need not be called). - - subroutine ice_read_ext(nu, nrec, work, atype, diag, & - ignore_eof, hit_eof) - - use ice_gather_scatter, only: scatter_global_ext - - integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) - - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(out) :: & - work ! output array (real, 8-byte) - - character (len=4), intent(in) :: & - atype ! format for input array - ! (real/integer, 4-byte/8-byte) - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - logical (kind=log_kind), optional, intent(in) :: ignore_eof - logical (kind=log_kind), optional, intent(out) :: hit_eof - - ! local variables - - integer (kind=int_kind) :: i, j, ios, nx, ny - - real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array - - logical (kind=log_kind) :: ignore_eof_use - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1 - - real (kind=real_kind), dimension(:,:), allocatable :: & - work_gr - - integer(kind=int_kind), dimension(:,:), allocatable :: & - work_gi4 - - integer(selected_int_kind(13)), dimension(:,:), allocatable :: & - work_gi8 - - character(len=*), parameter :: subname = '(ice_read_ext)' - - nx = nx_global + 2*nghost - ny = ny_global + 2*nghost - - if (my_task == master_task) then - allocate(work_g1(nx,ny)) - else - allocate(work_g1(1,1)) ! to save memory - endif - - if (my_task == master_task) then - - !------------------------------------------------------------------- - ! Read global array according to format atype - !------------------------------------------------------------------- - if (present(hit_eof)) hit_eof = .false. - - if (atype == 'ida4') then - allocate(work_gi4(nx,ny)) - read(nu,rec=nrec) work_gi4 - work_g1 = real(work_gi4,kind=dbl_kind) - deallocate(work_gi4) - elseif (atype == 'ida8') then - allocate(work_gi8(nx,ny)) - read(nu,rec=nrec) work_gi8 - work_g1 = real(work_gi8,kind=dbl_kind) - deallocate(work_gi8) - elseif (atype == 'rda4') then - allocate(work_gr(nx,ny)) - read(nu,rec=nrec) work_gr - work_g1 = work_gr - deallocate(work_gr) - elseif (atype == 'rda8') then - read(nu,rec=nrec) work_g1 - elseif (atype == 'ruf8') then - if (present(ignore_eof)) then - ignore_eof_use = ignore_eof - else - ignore_eof_use = .false. - endif - if (ignore_eof_use) then - ! Read line from file, checking for end-of-file - read(nu, iostat=ios) ((work_g1(i,j),i=1,nx), & - j=1,ny) - if (present(hit_eof)) hit_eof = ios < 0 - else - read(nu) ((work_g1(i,j),i=1,nx),j=1,ny) - endif - else - write(nu_diag,*) subname,' ERROR: reading unknown atype ',atype - endif - endif ! my_task = master_task - - if (present(hit_eof)) then - call broadcast_scalar(hit_eof,master_task) - if (hit_eof) then - deallocate(work_g1) - return - endif - endif - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - if (my_task==master_task .and. diag) then - amin = minval(work_g1) - amax = maxval(work_g1, mask = work_g1 /= spval_dbl) - asum = sum (work_g1, mask = work_g1 /= spval_dbl) - write(nu_diag,*) subname,' read_global ',nu, nrec, amin, amax, asum - endif - - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are always updated - !------------------------------------------------------------------- - - call scatter_global_ext(work, work_g1, master_task, distrb_info) - - deallocate(work_g1) - - end subroutine ice_read_ext - -!======================================================================= - -! Writes an unformatted file -! work is a real array, atype indicates the format of the data - - subroutine ice_write_xyt(nu, nrec, work, atype, diag) - - use ice_gather_scatter, only: gather_global - - integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) - - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(in) :: & - work ! input array (real, 8-byte) - - character (len=4), intent(in) :: & - atype ! format for output array - ! (real/integer, 4-byte/8-byte) - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - ! local variables - - integer (kind=int_kind) :: i, j - - real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1 - - real (kind=real_kind), dimension(:,:), allocatable :: & - work_gr - - integer(kind=int_kind), dimension(:,:), allocatable :: & - work_gi4 - - integer(selected_int_kind(13)), dimension(:,:), allocatable :: & - work_gi8 - - character(len=*), parameter :: subname = '(ice_write_xyt)' - - !------------------------------------------------------------------- - ! Gather data from individual processors - !------------------------------------------------------------------- - - if (my_task == master_task) then - allocate(work_g1(nx_global,ny_global)) - else - allocate(work_g1(1,1)) ! to save memory - endif - - call gather_global(work_g1, work, master_task, distrb_info, spc_val=c0) - - if (my_task == master_task) then - - !------------------------------------------------------------------- - ! Write global array according to format atype - !------------------------------------------------------------------- - if (atype == 'ida4') then - allocate(work_gi4(nx_global,ny_global)) - work_gi4 = nint(work_g1) - write(nu,rec=nrec) work_gi4 - deallocate(work_gi4) - elseif (atype == 'ida8') then - allocate(work_gi8(nx_global,ny_global)) - work_gi8 = nint(work_g1) - write(nu,rec=nrec) work_gi8 - deallocate(work_gi8) - elseif (atype == 'rda4') then - allocate(work_gr(nx_global,ny_global)) - work_gr = real(work_g1,real_kind) - write(nu,rec=nrec) work_gr - deallocate(work_gr) - elseif (atype == 'rda8') then - write(nu,rec=nrec) work_g1 - elseif (atype == 'ruf8') then - write(nu) ((work_g1(i,j),i=1,nx_global),j=1,ny_global) - else - write(nu_diag,*) subname,' ERROR: writing unknown atype ',atype - endif - - !------------------------------------------------------------------- - ! diagnostics - !------------------------------------------------------------------- - if (diag) then - amin = minval(work_g1) - amax = maxval(work_g1, mask = work_g1 /= spval_dbl) - asum = sum (work_g1, mask = work_g1 /= spval_dbl) - write(nu_diag,*) subname,' write_global ', nu, nrec, amin, amax, asum - endif - - endif ! my_task = master_task - - deallocate(work_g1) - - end subroutine ice_write_xyt - -!======================================================================= - -! Writes an unformatted file -! work is a real array, atype indicates the format of the data - - subroutine ice_write_xyzt(nu, nrec, work, atype, diag) - - use ice_gather_scatter, only: gather_global - use ice_domain_size, only: nblyr - - integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) - - real (kind=dbl_kind), dimension(nx_block,ny_block,nblyr+2,max_blocks), & - intent(in) :: & - work ! input array (real, 8-byte) - - character (len=4), intent(in) :: & - atype ! format for output array - ! (real/integer, 4-byte/8-byte) - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - ! local variables - - integer (kind=int_kind) :: i, j, k - - real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array - - real (kind=dbl_kind), dimension(:,:,:), allocatable :: & - work_g4 - - real (kind=real_kind), dimension(:,:,:), allocatable :: & - work_gr3 - - integer(kind=int_kind), dimension(:,:,:), allocatable :: & - work_gi5 - - integer(selected_int_kind(13)), dimension(:,:,:), allocatable :: & - work_gi9 - - character(len=*), parameter :: subname = '(ice_write_xyzt)' - - !------------------------------------------------------------------- - ! Gather data from individual processors - !------------------------------------------------------------------- - - if (my_task == master_task) then - allocate(work_g4(nx_global,ny_global,nblyr+2)) - else - allocate(work_g4(1,1,nblyr+2)) ! to save memory - endif - do k = 1,nblyr+2 - call gather_global(work_g4(:,:,k), work(:,:,k,:), master_task, & - distrb_info, spc_val=c0) - enddo !k - - if (my_task == master_task) then - - !------------------------------------------------------------------- - ! Write global array according to format atype - !------------------------------------------------------------------- - if (atype == 'ida4') then - allocate(work_gi5(nx_global,ny_global,nblyr+2)) - work_gi5 = nint(work_g4) - write(nu,rec=nrec) work_gi5 - deallocate(work_gi5) - elseif (atype == 'ida8') then - allocate(work_gi9(nx_global,ny_global,nblyr+2)) - work_gi9 = nint(work_g4) - write(nu,rec=nrec) work_gi9 - deallocate(work_gi9) - elseif (atype == 'rda4') then - allocate(work_gr3(nx_global,ny_global,nblyr+2)) - work_gr3 = real(work_g4,real_kind) - write(nu,rec=nrec) work_gr3 - deallocate(work_gr3) - elseif (atype == 'rda8') then - write(nu,rec=nrec) work_g4 - elseif (atype == 'ruf8') then - write(nu)(((work_g4(i,j,k),i=1,nx_global),j=1,ny_global), & - k=1,nblyr+2) - else - write(nu_diag,*) subname,' ERROR: writing unknown atype ',atype - endif - - !------------------------------------------------------------------- - ! diagnostics - !------------------------------------------------------------------- - if (diag) then - amin = minval(work_g4) - amax = maxval(work_g4, mask = work_g4 /= spval_dbl) - asum = sum (work_g4, mask = work_g4 /= spval_dbl) - write(nu_diag,*) subname,' write_global ', nu, nrec, amin, amax, asum - endif - - endif ! my_task = master_task - - deallocate(work_g4) - - end subroutine ice_write_xyzt - -!======================================================================= -! -! Writes an unformatted file, including ghost cells -! work is a real array, atype indicates the format of the data -! -! author: Tony Craig, NCAR - - subroutine ice_write_ext(nu, nrec, work, atype, diag) - - use ice_gather_scatter, only: gather_global_ext - - integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - nrec ! record number (0 for sequential access) - - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), & - intent(in) :: & - work ! input array (real, 8-byte) - - character (len=4), intent(in) :: & - atype ! format for output array - ! (real/integer, 4-byte/8-byte) - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - ! local variables - - integer (kind=int_kind) :: i, j, nx, ny - - real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1 - - real (kind=real_kind), dimension(:,:), allocatable :: & - work_gr - - integer(kind=int_kind), dimension(:,:), allocatable :: & - work_gi4 - - integer(selected_int_kind(13)), dimension(:,:), allocatable :: & - work_gi8 - - character(len=*), parameter :: subname = '(ice_write_ext)' - - !------------------------------------------------------------------- - ! Gather data from individual processors - !------------------------------------------------------------------- - - nx = nx_global + 2*nghost - ny = ny_global + 2*nghost - - if (my_task == master_task) then - allocate(work_g1(nx,ny)) - else - allocate(work_g1(1,1)) ! to save memory - endif - - call gather_global_ext(work_g1, work, master_task, distrb_info, spc_val=c0) - - if (my_task == master_task) then - - !------------------------------------------------------------------- - ! Write global array according to format atype - !------------------------------------------------------------------- - if (atype == 'ida4') then - allocate(work_gi4(nx,ny)) - work_gi4 = nint(work_g1) - write(nu,rec=nrec) work_gi4 - deallocate(work_gi4) - elseif (atype == 'ida8') then - allocate(work_gi8(nx,ny)) - work_gi8 = nint(work_g1) - write(nu,rec=nrec) work_gi8 - deallocate(work_gi8) - elseif (atype == 'rda4') then - allocate(work_gr(nx,ny)) - work_gr = real(work_g1,real_kind) - write(nu,rec=nrec) work_gr - deallocate(work_gr) - elseif (atype == 'rda8') then - write(nu,rec=nrec) work_g1 - elseif (atype == 'ruf8') then - write(nu) ((work_g1(i,j),i=1,nx),j=1,ny) - else - write(nu_diag,*) subname,' ERROR: writing unknown atype ',atype - endif - - !------------------------------------------------------------------- - ! diagnostics - !------------------------------------------------------------------- - if (diag) then - amin = minval(work_g1) - amax = maxval(work_g1, mask = work_g1 /= spval_dbl) - asum = sum (work_g1, mask = work_g1 /= spval_dbl) - write(nu_diag,*) subname,' write_global ', nu, nrec, amin, amax, asum - endif - - endif ! my_task = master_task - - deallocate(work_g1) - - end subroutine ice_write_ext - -!======================================================================= - -! Opens a netCDF file for reading -! Adapted by Alison McLaren, Met Office from ice_open - - subroutine ice_open_nc(filename, fid) - - character (char_len_long), intent(in) :: & - filename ! netCDF filename - - integer (kind=int_kind), intent(out) :: & - fid ! unit number - - ! local variables - - character(len=*), parameter :: subname = '(ice_open_nc)' - -#ifdef USE_NETCDF - integer (kind=int_kind) :: & - status ! status variable from netCDF routine - - if (my_task == master_task) then - - status = nf90_open(filename, NF90_NOWRITE, fid) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot open '//trim(filename), & - file=__FILE__, line=__LINE__) - endif - - endif ! my_task = master_task - -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined for '//trim(filename), & - file=__FILE__, line=__LINE__) - fid = -999 ! to satisfy intent(out) attribute -#endif - end subroutine ice_open_nc - -!======================================================================= - -! Read a netCDF file and scatter to processors. -! If the optional variables field_loc and field_type are present, -! the ghost cells are filled using values from the global array. -! This prevents them from being filled with zeroes in land cells -! (subroutine ice_HaloUpdate need not be called). -! -! Adapted by Alison McLaren, Met Office from ice_read - - subroutine ice_read_nc_xy(fid, nrec, varname, work, diag, & - field_loc, field_type, restart_ext) - - use ice_gather_scatter, only: scatter_global, scatter_global_ext - - integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - character (len=*), intent(in) :: & - varname ! field name in netcdf file - - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(out) :: & - work ! output array (real, 8-byte) - - logical (kind=log_kind), optional, intent(in) :: & - restart_ext ! if true, read extended grid - - integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) - - ! local variables - - character(len=*), parameter :: subname = '(ice_read_nc_xy)' - -#ifdef USE_NETCDF -! netCDF file diagnostics: - integer (kind=int_kind) :: & - varid , & ! variable id - status , & ! status output from netcdf routines - ndims , & ! number of dimensions - dimlen ! dimension size - - integer (kind=int_kind), dimension(10) :: & - dimids ! generic size dimids - - real (kind=dbl_kind) :: & - missingvalue, & - amin, amax, asum ! min, max values and sum of input array - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1 - - integer (kind=int_kind) :: nx, ny - - integer (kind=int_kind) :: lnrec ! local value of nrec - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g2 - - lnrec = nrec - - if (orca_halogrid .and. .not. present(restart_ext)) then - if (my_task == master_task) then - allocate(work_g2(nx_global+2,ny_global+1)) - else - allocate(work_g2(1,1)) ! to save memory - endif - work_g2(:,:) = c0 - endif - - nx = nx_global - ny = ny_global - - work = c0 ! to satisfy intent(out) attribute - - if (present(restart_ext)) then - if (restart_ext) then - nx = nx_global + 2*nghost - ny = ny_global + 2*nghost - endif - endif - - if (my_task == master_task) then - allocate(work_g1(nx,ny)) - else - allocate(work_g1(1,1)) ! to save memory - endif - - if (my_task == master_task) then - - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- - - status = nf90_inq_varid(fid, trim(varname), varid) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - - !------------------------------------------------------------- - ! Check nrec axis size - !------------------------------------------------------------- - - status = nf90_inquire_variable(fid, varid, ndims=ndims, dimids=dimids) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: inquire variable dimids '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - if (ndims > 2) then - status = nf90_inquire_dimension(fid, dimids(3), len=dimlen) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: inquire dimension size 3 '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - if (lnrec > dimlen) then - write(nu_diag,*) subname,' ERROR not enough records, ',trim(varname),lnrec,dimlen - call abort_ice(subname//' ERROR: not enough records '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - endif - - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- - - if (orca_halogrid .and. .not. present(restart_ext)) then - status = nf90_get_var( fid, varid, work_g2, & - start=(/1,1,lnrec/), & - count=(/nx_global+2,ny_global+1,1/)) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - work_g1 = work_g2(2:nx_global+1,1:ny_global) - else - status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,lnrec/), & - count=(/nx,ny,1/)) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - endif - - status = nf90_get_att(fid, varid, "_FillValue", missingvalue) - endif ! my_task = master_task - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - - if (my_task==master_task .and. diag) then - write(nu_diag,'(2a,i8,a,i8,2a)') & - subname,' fid= ',fid, ', lnrec = ',lnrec, & - ', varname = ',trim(varname) -! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) -! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar -! do id=1,ndim -! status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) -! write(nu_diag,*) subname,' Dim name = ',trim(dimname),', size = ',dimlen -! enddo - amin = minval(work_g1) - amax = maxval(work_g1, mask = work_g1 /= missingvalue) - asum = sum (work_g1, mask = work_g1 /= missingvalue) - write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum, trim(varname) - endif - - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are not updated unless field_loc is present. - !------------------------------------------------------------------- - - if (present(restart_ext)) then - if (restart_ext) then - call scatter_global_ext(work, work_g1, master_task, distrb_info) - endif - else - if (present(field_loc)) then - call scatter_global(work, work_g1, master_task, distrb_info, & - field_loc, field_type) - else - call scatter_global(work, work_g1, master_task, distrb_info, & - field_loc_noupdate, field_type_noupdate) - endif - endif - - deallocate(work_g1) - -! echmod: this should not be necessary if fill/missing are only on land - where (work > 1.0e+30_dbl_kind) work = c0 - - if (orca_halogrid .and. .not. present(restart_ext)) deallocate(work_g2) - -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) -#endif - end subroutine ice_read_nc_xy - -!======================================================================= - -! Read a netCDF file and scatter to processors. -! If the optional variables field_loc and field_type are present, -! the ghost cells are filled using values from the global array. -! This prevents them from being filled with zeroes in land cells -! (subroutine ice_HaloUpdate need not be called). -! -! Adapted by David Bailey, NCAR from ice_read_nc_xy - - subroutine ice_read_nc_xyz(fid, nrec, varname, work, diag, & - field_loc, field_type, restart_ext) - - use ice_gather_scatter, only: scatter_global, scatter_global_ext - - integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number - - character (len=*), intent(in) :: & - varname ! field name in netcdf file - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - real (kind=dbl_kind), dimension(nx_block,ny_block,ncat,max_blocks), intent(out) :: & - work ! output array (real, 8-byte) - - logical (kind=log_kind), optional, intent(in) :: & - restart_ext ! if true, read extended grid - - integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) - - ! local variables - - character(len=*), parameter :: subname = '(ice_read_nc_xyz)' - -#ifdef USE_NETCDF -! netCDF file diagnostics: - integer (kind=int_kind) :: & - n, & ! ncat index - varid , & ! variable id - status , & ! status output from netcdf routines - ndims , & ! number of dimensions - dimlen ! dimension size - - integer (kind=int_kind), dimension(10) :: & - dimids ! generic size dimids - - real (kind=dbl_kind) :: & - missingvalue, & ! missing value - amin, amax, asum ! min, max values and sum of input array - -! character (char_len) :: & -! dimname ! dimension name - - real (kind=dbl_kind), dimension(:,:,:), allocatable :: & - work_g1 - - integer (kind=int_kind) :: nx, ny - - integer (kind=int_kind) :: lnrec ! local value of nrec - - real (kind=dbl_kind), dimension(:,:,:), allocatable :: & - work_g2 - - lnrec = nrec - - if (orca_halogrid .and. .not. present(restart_ext)) then - if (my_task == master_task) then - allocate(work_g2(nx_global+2,ny_global+1,ncat)) - else - allocate(work_g2(1,1,ncat)) ! to save memory - endif - work_g2(:,:,:) = c0 - endif - - nx = nx_global - ny = ny_global - - if (present(restart_ext)) then - if (restart_ext) then - nx = nx_global + 2*nghost - ny = ny_global + 2*nghost - endif - endif - - if (my_task == master_task) then - allocate(work_g1(nx,ny,ncat)) - else - allocate(work_g1(1,1,ncat)) ! to save memory - endif - - if (my_task == master_task) then - - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- - - status = nf90_inq_varid(fid, trim(varname), varid) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - - !------------------------------------------------------------- - ! Check nrec axis size - !------------------------------------------------------------- - - status = nf90_inquire_variable(fid, varid, ndims=ndims, dimids=dimids) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: inquire variable dimids '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - if (ndims > 3) then - status = nf90_inquire_dimension(fid, dimids(4), len=dimlen) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: inquire dimension size 4 '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - if (lnrec > dimlen) then - write(nu_diag,*) subname,' ERROR not enough records, ',trim(varname),lnrec,dimlen - call abort_ice(subname//' ERROR: not enough records '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - endif - - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- - - if (orca_halogrid .and. .not. present(restart_ext)) then - status = nf90_get_var( fid, varid, work_g2, & - start=(/1,1,1,lnrec/), & - count=(/nx_global+2,ny_global+1,ncat,1/)) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - work_g1 = work_g2(2:nx_global+1,1:ny_global,:) - else - status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,1,lnrec/), & - count=(/nx,ny,ncat,1/)) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - endif - - status = nf90_get_att(fid, varid, "_FillValue", missingvalue) - endif ! my_task = master_task - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - - if (my_task==master_task .and. diag) then - write(nu_diag,'(2a,i8,a,i8,2a)') & - subname,' fid= ',fid, ', lnrec = ',lnrec, & - ', varname = ',trim(varname) -! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) -! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar -! do id=1,ndim -! status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) -! write(nu_diag,*) subname,' Dim name = ',trim(dimname),', size = ',dimlen -! enddo - do n=1,ncat - amin = minval(work_g1(:,:,n)) - amax = maxval(work_g1(:,:,n), mask = work_g1(:,:,n) /= missingvalue) - asum = sum (work_g1(:,:,n), mask = work_g1(:,:,n) /= missingvalue) - write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum, trim(varname) - enddo - endif - - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are not updated unless field_loc is present. - !------------------------------------------------------------------- - - if (present(restart_ext)) then - if (restart_ext) then - do n=1,ncat - call scatter_global_ext(work(:,:,n,:), work_g1(:,:,n), & - master_task, distrb_info) - enddo - endif - else - if (present(field_loc)) then - do n=1,ncat - call scatter_global(work(:,:,n,:), work_g1(:,:,n), master_task, & - distrb_info, field_loc, field_type) - enddo - else - do n=1,ncat - call scatter_global(work(:,:,n,:), work_g1(:,:,n), master_task, & - distrb_info, field_loc_noupdate, field_type_noupdate) - enddo - endif - endif - - deallocate(work_g1) - if (orca_halogrid .and. .not. present(restart_ext)) deallocate(work_g2) - -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) - work = c0 ! to satisfy intent(out) attribute -#endif - end subroutine ice_read_nc_xyz - -!======================================================================= - -! Read a netCDF file and scatter to processors. -! If the optional variables field_loc and field_type are present, -! the ghost cells are filled using values from the global array. -! This prevents them from being filled with zeroes in land cells -! (subroutine ice_HaloUpdate need not be called). -! -! Adapted by David Bailey, NCAR from ice_read_nc_xy -! Adapted by Lettie Roach, NIWA to read nfreq -! by changing all occurrences of ncat to nfreq - - subroutine ice_read_nc_xyf(fid, nrec, varname, work, diag, & - field_loc, field_type, restart_ext) - - use ice_fileunits, only: nu_diag - use ice_domain_size, only: nfreq - use ice_gather_scatter, only: scatter_global, scatter_global_ext - - integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number - - character (len=*), intent(in) :: & - varname ! field name in netcdf file - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - real (kind=dbl_kind), dimension(nx_block,ny_block,nfreq,1,max_blocks), & - intent(out) :: & - work ! output array (real, 8-byte) - - logical (kind=log_kind), optional, intent(in) :: & - restart_ext ! if true, read extended grid - - integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) - - ! local variables - -! netCDF file diagnostics: - integer (kind=int_kind) :: & - varid, & ! variable id - status, & ! status output from netcdf routines - ndim, nvar, & ! sizes of netcdf file - id, & ! dimension index - n, & ! ncat index - ndims, & ! number of dimensions - dimlen ! dimension size - - integer (kind=int_kind), dimension(10) :: & - dimids ! generic size dimids - - real (kind=dbl_kind) :: & - missingvalue, & ! missing value - amin, amax, asum ! min, max values and sum of input array - - character (char_len) :: & - dimname ! dimension name - - real (kind=dbl_kind), dimension(:,:,:), allocatable :: & - work_g1 - - integer (kind=int_kind) :: nx, ny - - integer (kind=int_kind) :: lnrec ! local value of nrec - - character(len=*), parameter :: subname = '(ice_read_nc_xyf)' - -#ifdef USE_NETCDF - real (kind=dbl_kind), dimension(:,:,:), allocatable :: & - work_g2 - - lnrec = nrec - - if (orca_halogrid .and. .not. present(restart_ext)) then - if (my_task == master_task) then - allocate(work_g2(nx_global+2,ny_global+1,nfreq)) - else - allocate(work_g2(1,1,nfreq)) ! to save memory - endif - work_g2(:,:,:) = c0 - endif - - nx = nx_global - ny = ny_global - - if (present(restart_ext)) then - if (restart_ext) then - nx = nx_global + 2*nghost - ny = ny_global + 2*nghost - endif - endif - - if (my_task == master_task) then - allocate(work_g1(nx,ny,nfreq)) - else - allocate(work_g1(1,1,nfreq)) ! to save memory - endif - - if (my_task == master_task) then - - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- - - status = nf90_inq_varid(fid, trim(varname), varid) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - - !------------------------------------------------------------- - ! Check nrec axis size - !------------------------------------------------------------- - - status = nf90_inquire_variable(fid, varid, ndims=ndims, dimids=dimids) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: inquire variable dimids '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - if (ndims > 3) then - status = nf90_inquire_dimension(fid, dimids(4), len=dimlen) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: inquire dimension size 4 '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - if (lnrec > dimlen) then - write(nu_diag,*) subname,' ERROR not enough records, ',trim(varname),lnrec,dimlen - call abort_ice(subname//' ERROR: not enough records '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - endif - - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- - - if (orca_halogrid .and. .not. present(restart_ext)) then - status = nf90_get_var( fid, varid, work_g2, & - start=(/1,1,1,lnrec/), & - count=(/nx_global+2,ny_global+1,nfreq,1/)) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - work_g1 = work_g2(2:nx_global+1,1:ny_global,:) - else - status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,1,lnrec/), & - count=(/nx,ny,nfreq,1/)) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - endif - - status = nf90_get_att(fid, varid, "missing_value", missingvalue) - endif ! my_task = master_task - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - - if (my_task==master_task .and. diag) then - write(nu_diag,'(2a,i8,a,i8,2a)') & - subname,' fid= ',fid, ', lnrec = ',lnrec, & - ', varname = ',trim(varname) - status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) - write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar - do id=1,ndim - status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) - write(nu_diag,*) subname,' Dim name = ',trim(dimname),', size = ',dimlen - enddo - write(nu_diag,*) subname,' missingvalue= ',missingvalue - do n = 1, nfreq - amin = minval(work_g1(:,:,n)) - amax = maxval(work_g1(:,:,n), mask = work_g1(:,:,n) /= missingvalue) - asum = sum (work_g1(:,:,n), mask = work_g1(:,:,n) /= missingvalue) - write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum - enddo - endif - - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are not updated unless field_loc is present. - !------------------------------------------------------------------- - - if (present(restart_ext)) then - if (restart_ext) then - do n = 1, nfreq - call scatter_global_ext(work(:,:,n,1,:), work_g1(:,:,n), & - master_task, distrb_info) - enddo - endif - else - if (present(field_loc)) then - do n = 1, nfreq - call scatter_global(work(:,:,n,1,:), work_g1(:,:,n), master_task, & - distrb_info, field_loc, field_type) - enddo - else - do n = 1, nfreq - call scatter_global(work(:,:,n,1,:), work_g1(:,:,n), master_task, & - distrb_info, field_loc_noupdate, field_type_noupdate) - enddo - endif - endif - -! echmod: this should not be necessary if fill/missing are only on land - where (work > 1.0e+30_dbl_kind) work = c0 - - deallocate(work_g1) - if (orca_halogrid .and. .not. present(restart_ext)) deallocate(work_g2) - -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) - work = c0 ! to satisfy intent(out) attribute -#endif - - end subroutine ice_read_nc_xyf - -!======================================================================= - -! Read a netCDF file -! Adapted by Alison McLaren, Met Office from ice_read - - subroutine ice_read_nc_point(fid, nrec, varname, work, diag, & - field_loc, field_type) - - integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - character (char_len), intent(in) :: & - varname ! field name in netcdf file - - integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) - - real (kind=dbl_kind), intent(out) :: & - work ! output variable (real, 8-byte) - - ! local variables - - character(len=*), parameter :: subname = '(ice_read_nc_point)' - -#ifdef USE_NETCDF -! netCDF file diagnostics: - integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status, & ! status output from netcdf routines - ndim, nvar, & ! sizes of netcdf file - id, & ! dimension index - ndims, & ! number of dimensions - dimlen ! dimension size - - integer (kind=int_kind), dimension(10) :: & - dimids ! generic size dimids - - real (kind=dbl_kind), dimension(1) :: & - workg ! temporary work variable - - integer (kind=int_kind) :: lnrec ! local value of nrec - - character (char_len) :: & - dimname ! dimension name - - lnrec = nrec - - if (my_task == master_task) then - - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- - - status = nf90_inq_varid(fid, trim(varname), varid) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - - !------------------------------------------------------------- - ! Check nrec axis size - !------------------------------------------------------------- - - status = nf90_inquire_variable(fid, varid, ndims=ndims, dimids=dimids) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: inquire variable dimids '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - if (ndims > 0) then - status = nf90_inquire_dimension(fid, dimids(1), len=dimlen) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: inquire dimension size 1 '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - if (lnrec > dimlen) then - write(nu_diag,*) subname,' ERROR not enough records, ',trim(varname),lnrec,dimlen - call abort_ice(subname//' ERROR: not enough records '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - endif - - !-------------------------------------------------------------- - ! Read point variable - !-------------------------------------------------------------- - - status = nf90_get_var(fid, varid, workg, & - start= (/ lnrec /), & - count=(/ 1 /)) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - endif ! my_task = master_task - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - - if (my_task==master_task .and. diag) then - write(nu_diag,'(2a,i8,a,i8,2a)') & - subname,' fid= ',fid, ', lnrec = ',lnrec, & - ', varname = ',trim(varname) - status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) - write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar - do id=1,ndim - status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) - write(nu_diag,*) subname,' Dim name = ',trim(dimname),', size = ',dimlen - enddo - endif - - work = workg(1) - -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) - work = c0 ! to satisfy intent(out) attribute -#endif - end subroutine ice_read_nc_point - -!======================================================================= - -! Written by T. Craig - - subroutine ice_read_nc_1D(fid, varname, work, diag, & - xdim) - - use ice_fileunits, only: nu_diag - - integer (kind=int_kind), intent(in) :: & - fid , & ! file id - xdim ! field dimensions - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - character (char_len), intent(in) :: & - varname ! field name in netcdf file - - real (kind=dbl_kind), dimension(:), intent(out) :: & - work ! output array - - ! local variables - - character(len=*), parameter :: subname = '(ice_read_nc_1D)' - -#ifdef USE_NETCDF -! netCDF file diagnostics: - integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status, & ! status output from netcdf routines - ndim, nvar ! sizes of netcdf file - - real (kind=dbl_kind), dimension(xdim) :: & - workg ! output array (real, 8-byte) - - !-------------------------------------------------------------- - - if (my_task == master_task) then - - if (size(work,dim=1) < xdim) then - write(nu_diag,*) subname,' work, dim=1 ',size(work,dim=1),xdim - call abort_ice (subname//' ERROR: work array wrong size '//trim(varname), & - file=__FILE__, line=__LINE__ ) - endif - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- - - status = nf90_inq_varid(fid, trim(varname), varid) - - if (status /= nf90_noerr) then - call abort_ice (subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__ ) - endif - - !-------------------------------------------------------------- - ! Read array - !-------------------------------------------------------------- - status = nf90_get_var( fid, varid, workg, & - start=(/1/), & - count=(/xdim/) ) - work(1:xdim) = workg(1:xdim) - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - - if (diag) then - write(nu_diag,*) subname, & - ' fid= ',fid, ', xdim = ',xdim, & - ' varname = ',trim(varname) - status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) - write(nu_diag,*) 'ndim= ',ndim,', nvar= ',nvar - endif - endif -#else - call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) - work = c0 ! to satisfy intent(out) attribute -#endif - - end subroutine ice_read_nc_1D - -!======================================================================= - -! Written by T. Craig - - subroutine ice_read_nc_2D(fid, varname, work, diag, & - xdim, ydim) - - use ice_fileunits, only: nu_diag - - integer (kind=int_kind), intent(in) :: & - fid , & ! file id - xdim, ydim ! field dimensions - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - character (char_len), intent(in) :: & - varname ! field name in netcdf file - - real (kind=dbl_kind), dimension(:,:), intent(out) :: & - work ! output array - - ! local variables - - character(len=*), parameter :: subname = '(ice_read_nc_2D)' - -#ifdef USE_NETCDF -! netCDF file diagnostics: - integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status, & ! status output from netcdf routines - ndim, nvar ! sizes of netcdf file - - real (kind=dbl_kind), dimension(xdim,ydim) :: & - workg ! output array (real, 8-byte) - - !-------------------------------------------------------------- - - if (my_task == master_task) then - - if (size(work,dim=1) < xdim .or. & - size(work,dim=2) < ydim) then - write(nu_diag,*) subname,' work, dim=1 ',size(work,dim=1),xdim - write(nu_diag,*) subname,' work, dim=2 ',size(work,dim=2),ydim - call abort_ice (subname//' ERROR: work array wrong size '//trim(varname), & - file=__FILE__, line=__LINE__ ) - endif - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- - - status = nf90_inq_varid(fid, trim(varname), varid) - - if (status /= nf90_noerr) then - call abort_ice (subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__ ) - endif - - !-------------------------------------------------------------- - ! Read array - !-------------------------------------------------------------- - status = nf90_get_var( fid, varid, workg, & - start=(/1,1/), & - count=(/xdim,ydim/) ) - work(1:xdim,1:ydim) = workg(1:xdim, 1:ydim) - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - - if (diag) then - write(nu_diag,*) subname, & - ' fid= ',fid, ', xdim = ',xdim, & - ' ydim= ', ydim, ' varname = ',trim(varname) - status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) - write(nu_diag,*) 'ndim= ',ndim,', nvar= ',nvar - endif - endif -#else - call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) - work = c0 ! to satisfy intent(out) attribute -#endif - - end subroutine ice_read_nc_2D - -!======================================================================= -!======================================================================= - -! Written by T. Craig - - subroutine ice_read_nc_3D(fid, varname, work, diag, & - xdim, ydim, zdim) - - use ice_fileunits, only: nu_diag - - integer (kind=int_kind), intent(in) :: & - fid , & ! file id - xdim, ydim,zdim ! field dimensions - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - character (char_len), intent(in) :: & - varname ! field name in netcdf file - - real (kind=dbl_kind), dimension(:,:,:), intent(out) :: & - work ! output array - - ! local variables - - character(len=*), parameter :: subname = '(ice_read_nc_3D)' - -#ifdef USE_NETCDF -! netCDF file diagnostics: - integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status, & ! status output from netcdf routines - ndim, nvar ! sizes of netcdf file - - real (kind=dbl_kind), dimension(xdim,ydim,zdim) :: & - workg ! output array (real, 8-byte) - - !-------------------------------------------------------------- - - if (my_task == master_task) then - - if (size(work,dim=1) < xdim .or. & - size(work,dim=2) < ydim .or. & - size(work,dim=3) < zdim ) then - write(nu_diag,*) subname,' work, dim=1 ',size(work,dim=1),xdim - write(nu_diag,*) subname,' work, dim=2 ',size(work,dim=2),ydim - write(nu_diag,*) subname,' work, dim=3 ',size(work,dim=3),zdim - call abort_ice (subname//' ERROR: work array wrong size '//trim(varname), & - file=__FILE__, line=__LINE__ ) - endif - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- - - status = nf90_inq_varid(fid, trim(varname), varid) - - if (status /= nf90_noerr) then - call abort_ice (subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__ ) - endif - - !-------------------------------------------------------------- - ! Read array - !-------------------------------------------------------------- - status = nf90_get_var( fid, varid, workg, & - start=(/1,1,1/), & - count=(/xdim,ydim,zdim/) ) - work(1:xdim,1:ydim,1:zdim) = workg(1:xdim, 1:ydim, 1:zdim) - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - - if (diag) then - write(nu_diag,*) subname, & - ' fid= ',fid, ', xdim = ',xdim, & - ' ydim= ', ydim,' zdim = ',zdim, ' varname = ',trim(varname) - status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) - write(nu_diag,*) 'ndim= ',ndim,', nvar= ',nvar - endif - endif -#else - call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) - work = c0 ! to satisfy intent(out) attribute -#endif - - end subroutine ice_read_nc_3D - -!======================================================================= - -! Adapted by Nicole Jeffery, LANL - - subroutine ice_read_nc_z(fid, nrec, varname, work, diag, & - field_loc, field_type) - - use ice_domain_size, only: nilyr - - integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - character (char_len), intent(in) :: & - varname ! field name in netcdf file - - integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) - - real (kind=dbl_kind), dimension(nilyr), intent(out) :: & - work ! output array (real, 8-byte) - - ! local variables - -#ifdef USE_NETCDF - real (kind=dbl_kind), dimension(:), allocatable :: & - work_z - -! netCDF file diagnostics: - integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status, & ! status output from netcdf routines - ndim, nvar, & ! sizes of netcdf file - id, & ! dimension index - ndims, & ! number of dimensions - dimlen ! dimension size - - integer (kind=int_kind), dimension(10) :: & - dimids ! generic size dimids - - character (char_len) :: & - dimname ! dimension name - - integer (kind=int_kind) :: lnrec ! local value of nrec - -#endif - - character(len=*), parameter :: subname = '(ice_read_nc_z)' - -#ifdef USE_NETCDF - - lnrec = nrec - - allocate(work_z(nilyr)) - - if (my_task == master_task) then - - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- - - status = nf90_inq_varid(fid, trim(varname), varid) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - - !------------------------------------------------------------- - ! Check nrec axis size - !------------------------------------------------------------- - - status = nf90_inquire_variable(fid, varid, ndims=ndims, dimids=dimids) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: inquire variable dimids '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - if (ndims > 1) then - status = nf90_inquire_dimension(fid, dimids(2), len=dimlen) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: inquire dimension size 2 '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - if (lnrec > dimlen) then - write(nu_diag,*) subname,' ERROR not enough records, ',trim(varname),lnrec,dimlen - call abort_ice(subname//' ERROR: not enough records '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - endif - - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- - - status = nf90_get_var( fid, varid, work_z, & - start=(/1,lnrec/), & - count=(/nilyr,1/)) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - endif ! my_task = master_task - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - - if (my_task==master_task .and. diag) then - write(nu_diag,'(2a,i8,a,i8,2a)') & - subname,' fid= ',fid, ', lnrec = ',lnrec, & - ', varname = ',trim(varname) - status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) - write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar - do id=1,ndim - status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) - write(nu_diag,*) subname,' Dim name = ',trim(dimname),', size = ',dimlen - enddo - endif - - work(:) = work_z(:) - deallocate(work_z) - -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) - work = c0 ! to satisfy intent(out) attribute -#endif - end subroutine ice_read_nc_z - -!======================================================================= - -! Write a netCDF file. -! -! Adapted by David Bailey, NCAR - - subroutine ice_write_nc_xy(fid, nrec, varid, work, diag, & - restart_ext, varname) - - use ice_gather_scatter, only: gather_global, gather_global_ext - - integer (kind=int_kind), intent(in) :: & - fid , & ! file id - varid , & ! variable id - nrec ! record number - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - logical (kind=log_kind), optional, intent(in) :: & - restart_ext ! if true, write extended grid - - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(in) :: & - work ! output array (real, 8-byte) - - character (len=*), optional, intent(in) :: & - varname ! variable name - - ! local variables - - character(len=*), parameter :: subname = '(ice_write_nc_xy)' - -#ifdef USE_NETCDF -! netCDF file diagnostics: - integer (kind=int_kind) :: & - status ! status output from netcdf routines -! ndim, nvar, & ! sizes of netcdf file -! id, & ! dimension index -! dimlen ! size of dimension - - real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array - - character (char_len) :: & - lvarname ! variable name -! dimname ! dimension name - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1 - - integer (kind=int_kind) :: nx, ny - - nx = nx_global - ny = ny_global - - if (present(restart_ext)) then - if (restart_ext) then - nx = nx_global + 2*nghost - ny = ny_global + 2*nghost - endif - endif - - if (present(varname)) then - lvarname = trim(varname) - else - lvarname = ' ' - endif - - if (my_task == master_task) then - allocate(work_g1(nx,ny)) - else - allocate(work_g1(1,1)) ! to save memory - endif - - if (present(restart_ext)) then - if (restart_ext) then - call gather_global_ext(work_g1, work, master_task, distrb_info, spc_val=c0) - endif - else - call gather_global(work_g1, work, master_task, distrb_info, spc_val=c0) - endif - - if (my_task == master_task) then - - !-------------------------------------------------------------- - ! Write global array - !-------------------------------------------------------------- - - status = nf90_put_var( fid, varid, work_g1, & - start=(/1,1,nrec/), & - count=(/nx,ny,1/)) - - endif ! my_task = master_task - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - - if (my_task==master_task .and. diag) then -! write(nu_diag,*) & -! subname,' fid= ',fid, ', nrec = ',nrec, & -! ', varid = ',varid -! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) -! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar -! do id=1,ndim -! status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) -! write(nu_diag,*) subname,' Dim name = ',trim(dimname),', size = ',dimlen -! enddo - amin = minval(work_g1) - amax = maxval(work_g1, mask = work_g1 /= spval_dbl) - asum = sum (work_g1, mask = work_g1 /= spval_dbl) - write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum, trim(lvarname) - endif - - deallocate(work_g1) - -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) -#endif - - end subroutine ice_write_nc_xy - -!======================================================================= - -! Write a netCDF file. -! -! Adapted by David Bailey, NCAR - - subroutine ice_write_nc_xyz(fid, nrec, varid, work, diag, & - restart_ext, varname) - - use ice_gather_scatter, only: gather_global, gather_global_ext - - integer (kind=int_kind), intent(in) :: & - fid , & ! file id - varid , & ! variable id - nrec ! record number - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - logical (kind=log_kind), optional, intent(in) :: & - restart_ext ! if true, read extended grid - - real (kind=dbl_kind), dimension(nx_block,ny_block,ncat,max_blocks), intent(in) :: & - work ! output array (real, 8-byte) - - character (len=*), optional, intent(in) :: & - varname ! variable name - - ! local variables - - character(len=*), parameter :: subname = '(ice_write_nc_xyz)' - -#ifdef USE_NETCDF -! netCDF file diagnostics: - integer (kind=int_kind) :: & - n, & ! ncat index - status ! status output from netcdf routines -! ndim, nvar, & ! sizes of netcdf file -! id, & ! dimension index -! dimlen ! size of dimension - - real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array - - character (char_len) :: & - lvarname ! variable name -! dimname ! dimension name - - real (kind=dbl_kind), dimension(:,:,:), allocatable :: & - work_g1 - - integer (kind=int_kind) :: nx, ny - - nx = nx_global - ny = ny_global - - if (present(restart_ext)) then - if (restart_ext) then - nx = nx_global + 2*nghost - ny = ny_global + 2*nghost - endif - endif - - if (my_task == master_task) then - allocate(work_g1(nx,ny,ncat)) - else - allocate(work_g1(1,1,ncat)) ! to save memory - endif - - if (present(restart_ext)) then - if (restart_ext) then - do n=1,ncat - call gather_global_ext(work_g1(:,:,n), work(:,:,n,:), & - master_task, distrb_info, spc_val=c0) - enddo - endif - else - do n=1,ncat - call gather_global(work_g1(:,:,n), work(:,:,n,:), & - master_task, distrb_info, spc_val=c0) - enddo - endif - - if (present(varname)) then - lvarname = trim(varname) - else - lvarname = ' ' - endif - - if (my_task == master_task) then - - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- - - status = nf90_put_var( fid, varid, work_g1, & - start=(/1,1,1,nrec/), & - count=(/nx,ny,ncat,1/)) - - endif ! my_task = master_task - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - - if (my_task==master_task .and. diag) then -! write(nu_diag,*) & -! subname,' fid= ',fid, ', nrec = ',nrec, & -! ', varid = ',varid -! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) -! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar -! do id=1,ndim -! status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) -! write(nu_diag,*) subname,' Dim name = ',trim(dimname),', size = ',dimlen -! enddo - amin = 10000._dbl_kind - amax = -10000._dbl_kind - do n=1,ncat - amin = minval(work_g1(:,:,n)) - amax = maxval(work_g1(:,:,n), mask = work_g1(:,:,n) /= spval_dbl) - asum = sum (work_g1(:,:,n), mask = work_g1(:,:,n) /= spval_dbl) - write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum, trim(lvarname) - enddo - endif - - deallocate(work_g1) - -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) -#endif - - end subroutine ice_write_nc_xyz - -!======================================================================= - -! Read a netcdf file. -! Just like ice_read_nc except that it returns a global array. -! work_g is a real array -! -! Adapted by William Lipscomb, LANL, from ice_read -! Adapted by Ann Keen, Met Office, to read from a netcdf file - - subroutine ice_read_global_nc (fid, nrec, varname, work_g, diag) - - integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number - - character (char_len), intent(in) :: & - varname ! field name in netcdf file - - real (kind=dbl_kind), dimension(nx_global,ny_global), intent(out) :: & - work_g ! output array (real, 8-byte) - - logical (kind=log_kind) :: & - diag ! if true, write diagnostic output - - ! local variables - - character(len=*), parameter :: subname = '(ice_read_global_nc)' - -#ifdef USE_NETCDF -! netCDF file diagnostics: - integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status ! status output from netcdf routines -! ndim, nvar, & ! sizes of netcdf file -! id, & ! dimension index -! dimlen ! size of dimension - - real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array - -! character (char_len) :: & -! dimname ! dimension name -! - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g3 - - if (orca_halogrid) then - if (my_task == master_task) then - allocate(work_g3(nx_global+2,ny_global+1)) - else - allocate(work_g3(1,1)) ! to save memory - endif - work_g3(:,:) = c0 - endif - - work_g(:,:) = c0 - - if (my_task == master_task) then - - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- - - status = nf90_inq_varid(fid, trim(varname), varid) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- - - if (orca_halogrid) then - status = nf90_get_var( fid, varid, work_g3, & - start=(/1,1,nrec/), & - count=(/nx_global+2,ny_global+1,1/)) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - work_g=work_g3(2:nx_global+1,1:ny_global) - else - status = nf90_get_var( fid, varid, work_g, & - start=(/1,1,nrec/), & - count=(/nx_global,ny_global,1/)) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - endif - endif ! my_task = master_task - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - - if (my_task == master_task .and. diag) then -! write(nu_diag,*) & -! subname,' fid= ',fid, ', nrec = ',nrec, & -! ', varname = ',trim(varname) -! status = nf90_inquire(fid, nDimensions=ndim, nVariables=nvar) -! write(nu_diag,*) subname,' ndim= ',ndim,', nvar= ',nvar -! do id=1,ndim -! status = nf90_inquire_dimension(fid,id,name=dimname,len=dimlen) -! write(nu_diag,*) subname,' Dim name = ',trim(dimname),', size = ',dimlen -! enddo - amin = minval(work_g) - amax = maxval(work_g, mask = work_g /= spval_dbl) - asum = sum (work_g, mask = work_g /= spval_dbl) - write(nu_diag,*) subname,' min, max, sum = ', amin, amax, asum, trim(varname) - endif - - if (orca_halogrid) deallocate(work_g3) - -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) - work_g = c0 ! to satisfy intent(out) attribute -#endif - - end subroutine ice_read_global_nc - -!======================================================================= - -! Closes a netCDF file -! author: Alison McLaren, Met Office - - subroutine ice_close_nc(fid) - - integer (kind=int_kind), intent(in) :: & - fid ! unit number - - ! local variables - - character(len=*), parameter :: subname = '(ice_close_nc)' - -#ifdef USE_NETCDF - integer (kind=int_kind) :: & - status ! status variable from netCDF routine - - if (my_task == master_task) then - status = nf90_close(fid) - endif -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) -#endif - - end subroutine ice_close_nc - -!======================================================================= - -! Read a netCDF file and scatter to processors. -! If the optional variables field_loc and field_type are present, -! the ghost cells are filled using values from the global array. -! This prevents them from being filled with zeroes in land cells -! (subroutine ice_HaloUpdate need not be called). -! -! Adapted by Elizabeth Hunke for reading 3D ocean currents - - subroutine ice_read_nc_uv(fid, nrec, nzlev, varname, work, diag, & - field_loc, field_type, restart_ext) - - use ice_gather_scatter, only: scatter_global, scatter_global_ext - - integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec , & ! record number - nzlev ! z level - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - character (len=*), intent(in) :: & - varname ! field name in netcdf file - - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks), intent(out) :: & - work ! output array (real, 8-byte) - - logical (kind=log_kind), optional, intent(in) :: & - restart_ext ! if true, read extended grid - - integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) - - ! local variables - - character(len=*), parameter :: subname = '(ice_read_nc_uv)' - -#ifdef USE_NETCDF -! netCDF file diagnostics: - integer (kind=int_kind) :: & - varid , & ! variable id - status ! status output from netcdf routines -! ndim, nvar, & ! sizes of netcdf file -! id, & ! dimension index -! dimlen ! size of dimension - - real (kind=dbl_kind) :: & - amin, amax, asum ! min, max values and sum of input array - -! character (char_len) :: & -! dimname ! dimension name - - real (kind=dbl_kind), dimension(:,:), allocatable :: & - work_g1 - - integer (kind=int_kind) :: nx, ny - - nx = nx_global - ny = ny_global - - if (present(restart_ext)) then - if (restart_ext) then - nx = nx_global + 2*nghost - ny = ny_global + 2*nghost - endif - endif - - if (my_task == master_task) then - allocate(work_g1(nx,ny)) - else - allocate(work_g1(1,1)) ! to save memory - endif - - if (my_task == master_task) then - - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- - - status = nf90_inq_varid(fid, trim(varname), varid) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- - - status = nf90_get_var( fid, varid, work_g1, & - start=(/1,1,nzlev,nrec/), & - count=(/nx,ny,1,1/)) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - - endif ! my_task = master_task - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - - if (my_task==master_task .and. diag) then - amin = minval(work_g1) - amax = maxval(work_g1, mask = work_g1 /= spval_dbl) - asum = sum (work_g1, mask = work_g1 /= spval_dbl) - write(nu_diag,*) subname,' min, max, sum =', amin, amax, asum, trim(varname) - endif - - !------------------------------------------------------------------- - ! Scatter data to individual processors. - ! NOTE: Ghost cells are not updated unless field_loc is present. - !------------------------------------------------------------------- - - if (present(restart_ext)) then - if (restart_ext) then - call scatter_global_ext(work, work_g1, master_task, distrb_info) - endif - else - if (present(field_loc)) then - call scatter_global(work, work_g1, master_task, distrb_info, & - field_loc, field_type) - else - call scatter_global(work, work_g1, master_task, distrb_info, & - field_loc_noupdate, field_type_noupdate) - endif - endif - - deallocate(work_g1) - -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) - work = c0 ! to satisfy intent(out) attribute -#endif - - end subroutine ice_read_nc_uv - -!======================================================================= -! Read a vector in a netcdf file. -! Just like ice_read_global_nc except that it returns a vector. -! work_g is a real vector -! -! Adapted by William Lipscomb, LANL, from ice_read -! Adapted by Ann Keen, Met Office, to read from a netcdf file - - subroutine ice_read_vec_nc (fid, nrec, varname, work_g, diag) - - integer (kind=int_kind), intent(in) :: & - fid , & ! file id - nrec ! record number - - character (char_len), intent(in) :: & - varname ! field name in netcdf file - - real (kind=dbl_kind), dimension(nrec), & - intent(out) :: & - work_g ! output array (real, 8-byte) - - logical (kind=log_kind) :: & - diag ! if true, write diagnostic output - - ! local variables - - character(len=*), parameter :: subname = '(ice_read_vec_nc)' - -#ifdef USE_NETCDF -! netCDF file diagnostics: - integer (kind=int_kind) :: & - varid, & ! netcdf id for field - status ! status output from netcdf routines - - real (kind=dbl_kind) :: & - amin, amax ! min, max values of input vector - - work_g(:) = c0 - - if (my_task == master_task) then - - !------------------------------------------------------------- - ! Find out ID of required variable - !------------------------------------------------------------- - - status = nf90_inq_varid(fid, trim(varname), varid) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot find variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - - !-------------------------------------------------------------- - ! Read global array - !-------------------------------------------------------------- - - status = nf90_get_var( fid, varid, work_g, & - start=(/1/), & - count=(/nrec/)) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: Cannot get variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - - endif ! my_task = master_task - - !------------------------------------------------------------------- - ! optional diagnostics - !------------------------------------------------------------------- - - if (my_task == master_task .and. diag) then - amin = minval(work_g) - amax = maxval(work_g) - write(nu_diag,*) subname,' min, max, nrec = ', amin, amax, nrec - endif - -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) - work_g = c0 ! to satisfy intent(out) attribute -#endif - - end subroutine ice_read_vec_nc - -!======================================================================= -! Get number of variables of a given variable - subroutine ice_get_ncvarsize(fid,varname,recsize) - - integer (kind=int_kind), intent(in) :: & - fid ! file id - character (char_len), intent(in) :: & - varname ! field name in netcdf file - integer (kind=int_kind), intent(out) :: & - recsize ! Number of records in file - - ! local variables - -#ifdef USE_NETCDF - integer (kind=int_kind) :: & - ndims, i, status - character (char_len) :: & - cvar -#endif - character(len=*), parameter :: subname = '(ice_get_ncvarsize)' - -#ifdef USE_NETCDF - if (my_task == master_task) then - status=nf90_inquire(fid, nDimensions = nDims) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: inquire nDimensions', & - file=__FILE__, line=__LINE__ ) - endif - do i=1,nDims - status = nf90_inquire_dimension(fid,i,name=cvar,len=recsize) - if (status /= nf90_noerr) then - call abort_ice(subname//' ERROR: inquire len for variable '//trim(cvar), & - file=__FILE__, line=__LINE__) - endif - if (trim(cvar) == trim(varname)) exit - enddo - if (trim(cvar) .ne. trim(varname)) then - call abort_ice(subname//' ERROR: Did not find variable '//trim(varname), & - file=__FILE__, line=__LINE__) - endif - endif -#else - call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) - recsize = 0 ! to satisfy intent(out) attribute -#endif - - end subroutine ice_get_ncvarsize - -!======================================================================= - - end module ice_read_write - -!======================================================================= diff --git a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 deleted file mode 100644 index 019ab8ce9..000000000 --- a/cicecore/cicedynB/infrastructure/io/io_netcdf/ice_history_write.F90 +++ /dev/null @@ -1,1348 +0,0 @@ -#ifdef ncdf -#define USE_NETCDF -#endif -!======================================================================= -! -! Writes history in netCDF format -! -! authors Tony Craig and Bruce Briegleb, NCAR -! Elizabeth C. Hunke and William H. Lipscomb, LANL -! C. M. Bitz, UW -! -! 2004 WHL: Block structure added -! 2006 ECH: Accepted some CESM code into mainstream CICE -! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. -! Added histfreq_n and histfreq='h' options, removed histfreq='w' -! Converted to free source form (F90) -! Added option for binary output instead of netCDF -! 2009 D Bailey and ECH: Generalized for multiple frequency output -! 2010 Alison McLaren and ECH: Added 3D capability -! 2013 ECH split from ice_history.F90 - - module ice_history_write - - use ice_constants, only: c0, c360, spval, spval_dbl - use ice_fileunits, only: nu_diag - use ice_exit, only: abort_ice - use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted - use icepack_intfc, only: icepack_query_parameters - - implicit none - private - public :: ice_write_hist - -!======================================================================= - - contains - -!======================================================================= -! -! write average ice quantities or snapshots -! -! author: Elizabeth C. Hunke, LANL - - subroutine ice_write_hist (ns) - - use ice_kinds_mod - use ice_arrays_column, only: hin_max, floe_rad_c - use ice_blocks, only: nx_block, ny_block - use ice_broadcast, only: broadcast_scalar - use ice_calendar, only: msec, timesecs, idate, idate0, write_ic, & - histfreq, days_per_year, use_leap_years, dayyr, & - hh_init, mm_init, ss_init - use ice_communicate, only: my_task, master_task - use ice_domain, only: distrb_info - use ice_domain_size, only: nx_global, ny_global, max_nstrm, max_blocks - use ice_gather_scatter, only: gather_global - use ice_grid, only: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT, & - hm, uvm, npm, epm, bm, tarea, uarea, narea, earea, & - dxU, dxT, dyU, dyT, dxN, dyN, dxE, dyE, HTN, HTE, ANGLE, ANGLET, & - lont_bounds, latt_bounds, lonu_bounds, latu_bounds, & - lonn_bounds, latn_bounds, lone_bounds, late_bounds - use ice_history_shared - use ice_restart_shared, only: lcdf64 -#ifdef CESMCOUPLED - use ice_restart_shared, only: runid -#endif -#ifdef USE_NETCDF - use netcdf -#endif - - integer (kind=int_kind), intent(in) :: ns - - ! local variables - - real (kind=dbl_kind), dimension(:,:), allocatable :: work_g1 - real (kind=dbl_kind), dimension(:,:,:), allocatable :: work1_3 - real (kind=dbl_kind), dimension(nx_block,ny_block,max_blocks) :: work1 - - integer (kind=int_kind) :: i,k,ic,n,nn, & - ncid,status,imtid,jmtid,kmtidi,kmtids,kmtidb, cmtid,timid,varid, & - nvertexid,ivertex,kmtida,iflag, fmtid - integer (kind=int_kind), dimension(3) :: dimid - integer (kind=int_kind), dimension(4) :: dimidz - integer (kind=int_kind), dimension(5) :: dimidcz - integer (kind=int_kind), dimension(3) :: dimid_nverts - integer (kind=int_kind), dimension(6) :: dimidex - real (kind=dbl_kind) :: ltime2 - character (char_len) :: title - character (char_len_long) :: ncfile(max_nstrm) - real (kind=dbl_kind) :: secday, rad_to_deg - - integer (kind=int_kind) :: ind,boundid - - integer (kind=int_kind) :: lprecision - - character (char_len) :: start_time,current_date,current_time - character (len=8) :: cdate - - ! 8 coordinate variables: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT - INTEGER (kind=int_kind), PARAMETER :: ncoord = 8 - - ! 4 vertices in each grid cell - INTEGER (kind=int_kind), PARAMETER :: nverts = 4 - - ! 8 variables describe T, U grid boundaries: - ! lont_bounds, latt_bounds, lonu_bounds, latu_bounds - INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 8 - - TYPE coord_attributes ! netcdf coordinate attributes - character (len=11) :: short_name - character (len=45) :: long_name - character (len=20) :: units - END TYPE coord_attributes - - TYPE req_attributes ! req'd netcdf attributes - type (coord_attributes) :: req - character (len=20) :: coordinates - END TYPE req_attributes - - TYPE(req_attributes), dimension(nvar_grd) :: var_grd - TYPE(coord_attributes), dimension(ncoord) :: var_coord - TYPE(coord_attributes), dimension(nvar_verts) :: var_nverts - TYPE(coord_attributes), dimension(nvar_grdz) :: var_grdz - CHARACTER (char_len), dimension(ncoord) :: coord_bounds - - character(len=*), parameter :: subname = '(ice_write_hist)' - -#ifdef USE_NETCDF - call icepack_query_parameters(secday_out=secday, rad_to_deg_out=rad_to_deg) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - lprecision = nf90_float - if (history_precision == 8) lprecision = nf90_double - - if (my_task == master_task) then - - ltime2 = timesecs/secday - - call construct_filename(ncfile(ns),'nc',ns) - - ! add local directory path name to ncfile - if (write_ic) then - ncfile(ns) = trim(incond_dir)//ncfile(ns) - else - ncfile(ns) = trim(history_dir)//ncfile(ns) - endif - - ! create file - iflag = nf90_clobber - if (lcdf64) iflag = ior(iflag,nf90_64bit_offset) - status = nf90_create(ncfile(ns), iflag, ncid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: creating history ncfile '//ncfile(ns)) - - !----------------------------------------------------------------- - ! define dimensions - !----------------------------------------------------------------- - - if (hist_avg) then - status = nf90_def_dim(ncid,'d2',2,boundid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining dim d2') - endif - - status = nf90_def_dim(ncid,'ni',nx_global,imtid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining dim ni') - - status = nf90_def_dim(ncid,'nj',ny_global,jmtid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining dim nj') - - status = nf90_def_dim(ncid,'nc',ncat_hist,cmtid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining dim nc') - - status = nf90_def_dim(ncid,'nkice',nzilyr,kmtidi) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining dim nki') - - status = nf90_def_dim(ncid,'nksnow',nzslyr,kmtids) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining dim nks') - - status = nf90_def_dim(ncid,'nkbio',nzblyr,kmtidb) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining dim nkb') - - status = nf90_def_dim(ncid,'nkaer',nzalyr,kmtida) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining dim nka') - - status = nf90_def_dim(ncid,'time',NF90_UNLIMITED,timid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining dim time') - - status = nf90_def_dim(ncid,'nvertices',nverts,nvertexid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining dim nverts') - - status = nf90_def_dim(ncid,'nf',nfsd_hist,fmtid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining dim nf') - - !----------------------------------------------------------------- - ! define coordinate variables - !----------------------------------------------------------------- - - status = nf90_def_var(ncid,'time',nf90_double,timid,varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining var time') - - status = nf90_put_att(ncid,varid,'long_name','model time') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ice Error: time long_name') - - write(cdate,'(i8.8)') idate0 - write(title,'(a,a4,a1,a2,a1,a2,a1,i2.2,a1,i2.2,a1,i2.2)') 'days since ', & - cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' ', & - hh_init,':',mm_init,':',ss_init - status = nf90_put_att(ncid,varid,'units',title) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: time units') - - if (days_per_year == 360) then - status = nf90_put_att(ncid,varid,'calendar','360_day') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: time calendar') - elseif (days_per_year == 365 .and. .not.use_leap_years ) then - status = nf90_put_att(ncid,varid,'calendar','NoLeap') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: time calendar') - elseif (use_leap_years) then - status = nf90_put_att(ncid,varid,'calendar','Gregorian') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: time calendar') - else - call abort_ice(subname//'ERROR: invalid calendar settings') - endif - - if (hist_avg) then - status = nf90_put_att(ncid,varid,'bounds','time_bounds') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: time bounds') - endif - - !----------------------------------------------------------------- - ! Define attributes for time bounds if hist_avg is true - !----------------------------------------------------------------- - - if (hist_avg) then - dimid(1) = boundid - dimid(2) = timid - status = nf90_def_var(ncid,'time_bounds',lprecision,dimid(1:2),varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining var time_bounds') - status = nf90_put_att(ncid,varid,'long_name', & - 'boundaries for time-averaging interval') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: time_bounds long_name') - write(cdate,'(i8.8)') idate0 - write(title,'(a,a4,a1,a2,a1,a2,a1,i2.2,a1,i2.2,a1,i2.2)') 'days since ', & - cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' ', & - hh_init,':',mm_init,':',ss_init - status = nf90_put_att(ncid,varid,'units',title) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: time_bounds units') - endif - - !----------------------------------------------------------------- - ! define information for required time-invariant variables - !----------------------------------------------------------------- - - ind = 0 - ind = ind + 1 - var_coord(ind) = coord_attributes('TLON', & - 'T grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lont_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('TLAT', & - 'T grid center latitude', 'degrees_north') - coord_bounds(ind) = 'latt_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('ULON', & - 'U grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lonu_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('ULAT', & - 'U grid center latitude', 'degrees_north') - coord_bounds(ind) = 'latu_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('NLON', & - 'N grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lonn_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('NLAT', & - 'N grid center latitude', 'degrees_north') - coord_bounds(ind) = 'latn_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('ELON', & - 'E grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lone_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('ELAT', & - 'E grid center latitude', 'degrees_north') - coord_bounds(ind) = 'late_bounds' - - var_grdz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') - var_grdz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') - var_grdz(3) = coord_attributes('VGRDs', 'vertical snow levels', '1') - var_grdz(4) = coord_attributes('VGRDb', 'vertical ice-bio levels', '1') - var_grdz(5) = coord_attributes('VGRDa', 'vertical snow-ice-bio levels', '1') - var_grdz(6) = coord_attributes('NFSD', 'category floe size (center)', 'm') - - !----------------------------------------------------------------- - ! define information for optional time-invariant variables - !----------------------------------------------------------------- - - var_grd(n_tmask)%req = coord_attributes('tmask', & - 'mask of T grid cells, 0 = land, 1 = ocean', 'unitless') - var_grd(n_tmask)%coordinates = 'TLON TLAT' - var_grd(n_umask)%req = coord_attributes('umask', & - 'mask of U grid cells, 0 = land, 1 = ocean', 'unitless') - var_grd(n_umask)%coordinates = 'ULON ULAT' - var_grd(n_nmask)%req = coord_attributes('nmask', & - 'mask of N grid cells, 0 = land, 1 = ocean', 'unitless') - var_grd(n_nmask)%coordinates = 'NLON NLAT' - var_grd(n_emask)%req = coord_attributes('emask', & - 'mask of E grid cells, 0 = land, 1 = ocean', 'unitless') - var_grd(n_emask)%coordinates = 'ELON ELAT' - - var_grd(n_tarea)%req = coord_attributes('tarea', & - 'area of T grid cells', 'm^2') - var_grd(n_tarea)%coordinates = 'TLON TLAT' - var_grd(n_uarea)%req = coord_attributes('uarea', & - 'area of U grid cells', 'm^2') - var_grd(n_uarea)%coordinates = 'ULON ULAT' - var_grd(n_narea)%req = coord_attributes('narea', & - 'area of N grid cells', 'm^2') - var_grd(n_narea)%coordinates = 'NLON NLAT' - var_grd(n_earea)%req = coord_attributes('earea', & - 'area of E grid cells', 'm^2') - var_grd(n_earea)%coordinates = 'ELON ELAT' - - var_grd(n_blkmask)%req = coord_attributes('blkmask', & - 'block id of T grid cells, mytask + iblk/100', 'unitless') - var_grd(n_blkmask)%coordinates = 'TLON TLAT' - - var_grd(n_dxt)%req = coord_attributes('dxt', & - 'T cell width through middle', 'm') - var_grd(n_dxt)%coordinates = 'TLON TLAT' - var_grd(n_dyt)%req = coord_attributes('dyt', & - 'T cell height through middle', 'm') - var_grd(n_dyt)%coordinates = 'TLON TLAT' - var_grd(n_dxu)%req = coord_attributes('dxu', & - 'U cell width through middle', 'm') - var_grd(n_dxu)%coordinates = 'ULON ULAT' - var_grd(n_dyu)%req = coord_attributes('dyu', & - 'U cell height through middle', 'm') - var_grd(n_dyu)%coordinates = 'ULON ULAT' - var_grd(n_dxn)%req = coord_attributes('dxn', & - 'N cell width through middle', 'm') - var_grd(n_dxn)%coordinates = 'NLON NLAT' - var_grd(n_dyn)%req = coord_attributes('dyn', & - 'N cell height through middle', 'm') - var_grd(n_dyn)%coordinates = 'NLON NLAT' - var_grd(n_dxe)%req = coord_attributes('dxe', & - 'E cell width through middle', 'm') - var_grd(n_dxe)%coordinates = 'ELON ELAT' - var_grd(n_dye)%req = coord_attributes('dye', & - 'E cell height through middle', 'm') - var_grd(n_dye)%coordinates = 'ELON ELAT' - - var_grd(n_HTN)%req = coord_attributes('HTN', & - 'T cell width on North side','m') - var_grd(n_HTN)%coordinates = 'TLON TLAT' - var_grd(n_HTE)%req = coord_attributes('HTE', & - 'T cell width on East side', 'm') - var_grd(n_HTE)%coordinates = 'TLON TLAT' - var_grd(n_ANGLE)%req = coord_attributes('ANGLE', & - 'angle grid makes with latitude line on U grid', & - 'radians') - var_grd(n_ANGLE)%coordinates = 'ULON ULAT' - var_grd(n_ANGLET)%req = coord_attributes('ANGLET', & - 'angle grid makes with latitude line on T grid', & - 'radians') - var_grd(n_ANGLET)%coordinates = 'TLON TLAT' - - ! These fields are required for CF compliance - ! dimensions (nx,ny,nverts) - var_nverts(n_lont_bnds) = coord_attributes('lont_bounds', & - 'longitude boundaries of T cells', 'degrees_east') - var_nverts(n_latt_bnds) = coord_attributes('latt_bounds', & - 'latitude boundaries of T cells', 'degrees_north') - var_nverts(n_lonu_bnds) = coord_attributes('lonu_bounds', & - 'longitude boundaries of U cells', 'degrees_east') - var_nverts(n_latu_bnds) = coord_attributes('latu_bounds', & - 'latitude boundaries of U cells', 'degrees_north') - var_nverts(n_lonn_bnds) = coord_attributes('lonn_bounds', & - 'longitude boundaries of N cells', 'degrees_east') - var_nverts(n_latn_bnds) = coord_attributes('latn_bounds', & - 'latitude boundaries of N cells', 'degrees_north') - var_nverts(n_lone_bnds) = coord_attributes('lone_bounds', & - 'longitude boundaries of E cells', 'degrees_east') - var_nverts(n_late_bnds) = coord_attributes('late_bounds', & - 'latitude boundaries of E cells', 'degrees_north') - - !----------------------------------------------------------------- - ! define attributes for time-invariant variables - !----------------------------------------------------------------- - - dimid(1) = imtid - dimid(2) = jmtid - dimid(3) = timid - - do i = 1, ncoord - status = nf90_def_var(ncid, var_coord(i)%short_name, lprecision, & - dimid(1:2), varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining short_name for '//var_coord(i)%short_name) - status = nf90_put_att(ncid,varid,'long_name',var_coord(i)%long_name) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//var_coord(i)%short_name) - status = nf90_put_att(ncid, varid, 'units', var_coord(i)%units) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//var_coord(i)%short_name) - call ice_write_hist_fill(ncid,varid,var_coord(i)%short_name,history_precision) - if (var_coord(i)%short_name == 'ULAT') then - status = nf90_put_att(ncid,varid,'comment', & - 'Latitude of NE corner of T grid cell') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining comment for '//var_coord(i)%short_name) - endif - if (f_bounds) then - status = nf90_put_att(ncid, varid, 'bounds', coord_bounds(i)) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining bounds for '//var_coord(i)%short_name) - endif - enddo - - ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR, NZALYR, NFSD) - dimidex(1)=cmtid - dimidex(2)=kmtidi - dimidex(3)=kmtids - dimidex(4)=kmtidb - dimidex(5)=kmtida - dimidex(6)=fmtid - - do i = 1, nvar_grdz - if (igrdz(i)) then - status = nf90_def_var(ncid, var_grdz(i)%short_name, & - lprecision, dimidex(i), varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining short_name for '//var_grdz(i)%short_name) - status = nf90_put_att(ncid,varid,'long_name',var_grdz(i)%long_name) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//var_grdz(i)%short_name) - status = nf90_put_att(ncid, varid, 'units', var_grdz(i)%units) - if (Status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//var_grdz(i)%short_name) - endif - enddo - - do i = 1, nvar_grd - if (igrd(i)) then - status = nf90_def_var(ncid, var_grd(i)%req%short_name, & - lprecision, dimid(1:2), varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining variable '//var_grd(i)%req%short_name) - status = nf90_put_att(ncid,varid, 'long_name', var_grd(i)%req%long_name) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//var_grd(i)%req%short_name) - status = nf90_put_att(ncid, varid, 'units', var_grd(i)%req%units) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//var_grd(i)%req%short_name) - status = nf90_put_att(ncid, varid, 'coordinates', var_grd(i)%coordinates) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//var_grd(i)%req%short_name) - call ice_write_hist_fill(ncid,varid,var_grd(i)%req%short_name,history_precision) - endif - enddo - - ! Fields with dimensions (nverts,nx,ny) - dimid_nverts(1) = nvertexid - dimid_nverts(2) = imtid - dimid_nverts(3) = jmtid - do i = 1, nvar_verts - if (f_bounds) then - status = nf90_def_var(ncid, var_nverts(i)%short_name, & - lprecision,dimid_nverts, varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining variable '//var_nverts(i)%short_name) - status = nf90_put_att(ncid,varid, 'long_name', var_nverts(i)%long_name) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//var_nverts(i)%short_name) - status = nf90_put_att(ncid, varid, 'units', var_nverts(i)%units) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//var_nverts(i)%short_name) - call ice_write_hist_fill(ncid,varid,var_nverts(i)%short_name,history_precision) - endif - enddo - - do n=1,num_avail_hist_fields_2D - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & - lprecision, dimid, varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining variable '//avail_hist_fields(n)%vname) - call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_2D - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = cmtid - dimidz(4) = timid - - do n = n2D + 1, n3Dccum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & - lprecision, dimidz, varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining variable '//avail_hist_fields(n)%vname) - call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_3Dc - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = kmtidi - dimidz(4) = timid - - do n = n3Dccum + 1, n3Dzcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & - lprecision, dimidz, varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining variable '//avail_hist_fields(n)%vname) - call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_3Dz - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = kmtidb - dimidz(4) = timid - - do n = n3Dzcum + 1, n3Dbcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & - lprecision, dimidz, varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining variable '//avail_hist_fields(n)%vname) - call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_3Db - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = kmtida - dimidz(4) = timid - - do n = n3Dbcum + 1, n3Dacum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & - lprecision, dimidz, varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining variable '//avail_hist_fields(n)%vname) - call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_3Da - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = fmtid - dimidz(4) = timid - - do n = n3Dacum + 1, n3Dfcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & - lprecision, dimidz, varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining variable '//avail_hist_fields(n)%vname) - call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_3Df - - dimidcz(1) = imtid - dimidcz(2) = jmtid - dimidcz(3) = kmtidi - dimidcz(4) = cmtid - dimidcz(5) = timid - - do n = n3Dfcum + 1, n4Dicum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & -! lprecision, dimidcz, varid) - lprecision, dimidcz(1:4), varid) ! ferret - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining variable '//avail_hist_fields(n)%vname) - call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_4Di - - dimidcz(1) = imtid - dimidcz(2) = jmtid - dimidcz(3) = kmtids - dimidcz(4) = cmtid - dimidcz(5) = timid - - do n = n4Dicum + 1, n4Dscum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & -! lprecision, dimidcz, varid) - lprecision, dimidcz(1:4), varid) ! ferret - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining variable '//avail_hist_fields(n)%vname) - call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_4Ds - - dimidcz(1) = imtid - dimidcz(2) = jmtid - dimidcz(3) = fmtid - dimidcz(4) = cmtid - dimidcz(5) = timid - - do n = n4Dscum + 1, n4Dfcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = nf90_def_var(ncid, avail_hist_fields(n)%vname, & -! lprecision, dimidcz, varid) - lprecision, dimidcz(1:4), varid) ! ferret - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining variable '//avail_hist_fields(n)%vname) - call ice_write_hist_attrs(ncid,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_4Df - - !----------------------------------------------------------------- - ! global attributes - !----------------------------------------------------------------- - ! ... the user should change these to something useful ... - !----------------------------------------------------------------- -#ifdef CESMCOUPLED - status = nf90_put_att(ncid,nf90_global,'title',runid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: in global attribute title') -#else - title = 'sea ice model output for CICE' - status = nf90_put_att(ncid,nf90_global,'title',title) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: in global attribute title') -#endif - title = 'Diagnostic and Prognostic Variables' - status = nf90_put_att(ncid,nf90_global,'contents',title) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: global attribute contents') - - write(title,'(2a)') 'Los Alamos Sea Ice Model, ', trim(version_name) - status = nf90_put_att(ncid,nf90_global,'source',title) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: global attribute source') - - if (use_leap_years) then - write(title,'(a,i3,a)') 'This year has ',dayyr,' days' - else - write(title,'(a,i3,a)') 'All years have exactly ',dayyr,' days' - endif - status = nf90_put_att(ncid,nf90_global,'comment',title) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: global attribute comment') - - write(title,'(a,i8.8)') 'File written on model date ',idate - status = nf90_put_att(ncid,nf90_global,'comment2',title) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: global attribute date1') - - write(title,'(a,i6)') 'seconds elapsed into model date: ',msec - status = nf90_put_att(ncid,nf90_global,'comment3',title) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: global attribute date2') - - title = 'CF-1.0' - status = & - nf90_put_att(ncid,nf90_global,'conventions',title) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: in global attribute conventions') - - call date_and_time(date=current_date, time=current_time) - write(start_time,1000) current_date(1:4), current_date(5:6), & - current_date(7:8), current_time(1:2), & - current_time(3:4), current_time(5:8) -1000 format('This dataset was created on ', & - a,'-',a,'-',a,' at ',a,':',a,':',a) - - status = nf90_put_att(ncid,nf90_global,'history',start_time) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: global attribute history') - - status = nf90_put_att(ncid,nf90_global,'io_flavor','io_netcdf') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: global attribute io_flavor') - - !----------------------------------------------------------------- - ! end define mode - !----------------------------------------------------------------- - - status = nf90_enddef(ncid) - if (status /= nf90_noerr) call abort_ice(subname//'ERROR in nf90_enddef') - - !----------------------------------------------------------------- - ! write time variable - !----------------------------------------------------------------- - - status = nf90_inq_varid(ncid,'time',varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting time varid') - status = nf90_put_var(ncid,varid,ltime2) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing time variable') - - !----------------------------------------------------------------- - ! write time_bounds info - !----------------------------------------------------------------- - - if (hist_avg) then - status = nf90_inq_varid(ncid,'time_bounds',varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting time_bounds id') - status = nf90_put_var(ncid,varid,time_beg(ns),start=(/1/)) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing time_beg') - status = nf90_put_var(ncid,varid,time_end(ns),start=(/2/)) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing time_end') - endif - - endif ! master_task - - if (my_task==master_task) then - allocate(work_g1(nx_global,ny_global)) - else - allocate(work_g1(1,1)) - endif - - work_g1(:,:) = c0 - - !----------------------------------------------------------------- - ! write coordinate variables - !----------------------------------------------------------------- - - do i = 1,ncoord - call broadcast_scalar(var_coord(i)%short_name,master_task) - SELECT CASE (var_coord(i)%short_name) - CASE ('TLON') - ! Convert T grid longitude from -180 -> 180 to 0 to 360 - work1 = TLON*rad_to_deg + c360 - where (work1 > c360) work1 = work1 - c360 - where (work1 < c0 ) work1 = work1 + c360 - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('TLAT') - work1 = TLAT*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('ULON') - work1 = ULON*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('ULAT') - work1 = ULAT*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('NLON') - work1 = NLON*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('NLAT') - work1 = NLAT*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('ELON') - work1 = ELON*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - CASE ('ELAT') - work1 = ELAT*rad_to_deg - call gather_global(work_g1,work1,master_task,distrb_info) - END SELECT - - if (my_task == master_task) then - status = nf90_inq_varid(ncid, var_coord(i)%short_name, varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//var_coord(i)%short_name) - status = nf90_put_var(ncid,varid,work_g1) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing'//var_coord(i)%short_name) - endif - enddo - - ! Extra dimensions (NCAT, NFSD, VGRD*) - - do i = 1, nvar_grdz - if (igrdz(i)) then - call broadcast_scalar(var_grdz(i)%short_name,master_task) - if (my_task == master_task) then - status = nf90_inq_varid(ncid, var_grdz(i)%short_name, varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//var_grdz(i)%short_name) - SELECT CASE (var_grdz(i)%short_name) - CASE ('NCAT') - status = nf90_put_var(ncid,varid,hin_max(1:ncat_hist)) - CASE ('NFSD') - status = nf90_put_var(ncid,varid,floe_rad_c(1:nfsd_hist)) - CASE ('VGRDi') ! index - needed for Met Office analysis code - status = nf90_put_var(ncid,varid,(/(k, k=1,nzilyr)/)) - CASE ('VGRDs') ! index - needed for Met Office analysis code - status = nf90_put_var(ncid,varid,(/(k, k=1,nzslyr)/)) - CASE ('VGRDb') - status = nf90_put_var(ncid,varid,(/(k, k=1,nzblyr)/)) - CASE ('VGRDa') - status = nf90_put_var(ncid,varid,(/(k, k=1,nzalyr)/)) - END SELECT - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing'//var_grdz(i)%short_name) - endif - endif - enddo - - !----------------------------------------------------------------- - ! write grid masks, area and rotation angle - !----------------------------------------------------------------- - - do i = 1, nvar_grd - if (igrd(i)) then - call broadcast_scalar(var_grd(i)%req%short_name,master_task) - SELECT CASE (var_grd(i)%req%short_name) - CASE ('tmask') - call gather_global(work_g1, hm, master_task, distrb_info) - CASE ('umask') - call gather_global(work_g1, uvm, master_task, distrb_info) - CASE ('nmask') - call gather_global(work_g1, npm, master_task, distrb_info) - CASE ('emask') - call gather_global(work_g1, epm, master_task, distrb_info) - CASE ('tarea') - call gather_global(work_g1, tarea, master_task, distrb_info) - CASE ('uarea') - call gather_global(work_g1, uarea, master_task, distrb_info) - CASE ('narea') - call gather_global(work_g1, narea, master_task, distrb_info) - CASE ('earea') - call gather_global(work_g1, earea, master_task, distrb_info) - CASE ('blkmask') - call gather_global(work_g1, bm, master_task, distrb_info) - CASE ('dxu') - call gather_global(work_g1, dxU, master_task, distrb_info) - CASE ('dyu') - call gather_global(work_g1, dyU, master_task, distrb_info) - CASE ('dxt') - call gather_global(work_g1, dxT, master_task, distrb_info) - CASE ('dyt') - call gather_global(work_g1, dyT, master_task, distrb_info) - CASE ('dxn') - call gather_global(work_g1, dxN, master_task, distrb_info) - CASE ('dyn') - call gather_global(work_g1, dyN, master_task, distrb_info) - CASE ('dxe') - call gather_global(work_g1, dxE, master_task, distrb_info) - CASE ('dye') - call gather_global(work_g1, dyE, master_task, distrb_info) - CASE ('HTN') - call gather_global(work_g1, HTN, master_task, distrb_info) - CASE ('HTE') - call gather_global(work_g1, HTE, master_task, distrb_info) - CASE ('ANGLE') - call gather_global(work_g1, ANGLE, master_task, distrb_info) - CASE ('ANGLET') - call gather_global(work_g1, ANGLET,master_task, distrb_info) - END SELECT - - if (my_task == master_task) then - status = nf90_inq_varid(ncid, var_grd(i)%req%short_name, varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//var_grd(i)%req%short_name) - status = nf90_put_var(ncid,varid,work_g1) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable '//var_grd(i)%req%short_name) - endif - endif - enddo - - !---------------------------------------------------------------- - ! Write coordinates of grid box vertices - !---------------------------------------------------------------- - - if (f_bounds) then - if (my_task==master_task) then - allocate(work1_3(nverts,nx_global,ny_global)) - else - allocate(work1_3(1,1,1)) ! to save memory - endif - - work1_3(:,:,:) = c0 - work1 (:,:,:) = c0 - - do i = 1, nvar_verts - call broadcast_scalar(var_nverts(i)%short_name,master_task) - SELECT CASE (var_nverts(i)%short_name) - CASE ('lont_bounds') - do ivertex = 1, nverts - work1(:,:,:) = lont_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('latt_bounds') - do ivertex = 1, nverts - work1(:,:,:) = latt_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('lonu_bounds') - do ivertex = 1, nverts - work1(:,:,:) = lonu_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('latu_bounds') - do ivertex = 1, nverts - work1(:,:,:) = latu_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('lonn_bounds') - do ivertex = 1, nverts - work1(:,:,:) = lonn_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('latn_bounds') - do ivertex = 1, nverts - work1(:,:,:) = latn_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('lone_bounds') - do ivertex = 1, nverts - work1(:,:,:) = lone_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - CASE ('late_bounds') - do ivertex = 1, nverts - work1(:,:,:) = late_bounds(ivertex,:,:,:) - call gather_global(work_g1, work1, master_task, distrb_info) - if (my_task == master_task) work1_3(ivertex,:,:) = work_g1(:,:) - enddo - END SELECT - - if (my_task == master_task) then - status = nf90_inq_varid(ncid, var_nverts(i)%short_name, varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//var_nverts(i)%short_name) - status = nf90_put_var(ncid,varid,work1_3) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable '//var_nverts(i)%short_name) - endif - enddo - deallocate(work1_3) - endif - - !----------------------------------------------------------------- - ! write variable data - !----------------------------------------------------------------- - - work_g1(:,:) = c0 - - do n=1,num_avail_hist_fields_2D - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - call gather_global(work_g1, a2D(:,:,n,:), & - master_task, distrb_info) - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - status = nf90_put_var(ncid,varid,work_g1, & - count=(/nx_global,ny_global/)) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable '//avail_hist_fields(n)%vname) - endif - - endif - enddo ! num_avail_hist_fields_2D - - work_g1(:,:) = c0 - - do n = n2D + 1, n3Dccum - nn = n - n2D - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - endif - do k = 1, ncat_hist - call gather_global(work_g1, a3Dc(:,:,k,nn,:), & - master_task, distrb_info) - - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - status = nf90_put_var(ncid,varid,work_g1, & - start=(/ 1, 1,k/), & - count=(/nx_global,ny_global,1/)) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable '//avail_hist_fields(n)%vname) - endif - enddo ! k - endif - enddo ! num_avail_hist_fields_3Dc - - work_g1(:,:) = c0 - - do n = n3Dccum+1, n3Dzcum - nn = n - n3Dccum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - endif - do k = 1, nzilyr - call gather_global(work_g1, a3Dz(:,:,k,nn,:), & - master_task, distrb_info) - - if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_g1, & - start=(/ 1, 1,k/), & - count=(/nx_global,ny_global,1/)) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable '//avail_hist_fields(n)%vname) - endif - enddo ! k - endif - enddo ! num_avail_hist_fields_3Dz - - work_g1(:,:) = c0 - - do n = n3Dzcum+1, n3Dbcum - nn = n - n3Dzcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - endif - do k = 1, nzblyr - call gather_global(work_g1, a3Db(:,:,k,nn,:), & - master_task, distrb_info) - - if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_g1, & - start=(/ 1, 1,k/), & - count=(/nx_global,ny_global,1/)) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable '//avail_hist_fields(n)%vname) - endif - enddo ! k - endif - enddo ! num_avail_hist_fields_3Db - - work_g1(:,:) = c0 - - do n = n3Dbcum+1, n3Dacum - nn = n - n3Dbcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - endif - do k = 1, nzalyr - call gather_global(work_g1, a3Da(:,:,k,nn,:), & - master_task, distrb_info) - - if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_g1, & - start=(/ 1, 1,k/), & - count=(/nx_global,ny_global,1/)) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable '//avail_hist_fields(n)%vname) - endif - enddo ! k - endif - enddo ! num_avail_hist_fields_3Da - - work_g1(:,:) = c0 - - do n = n3Dacum+1, n3Dfcum - nn = n - n3Dacum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - endif - do k = 1, nfsd_hist - call gather_global(work_g1, a3Df(:,:,k,nn,:), & - master_task, distrb_info) - if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_g1, & - start=(/ 1, 1,k/), & - count=(/nx_global,ny_global,1/)) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable '//avail_hist_fields(n)%vname) - endif - enddo ! k - endif - enddo ! num_avail_hist_fields_3Df - - work_g1(:,:) = c0 - - do n = n3Dfcum+1, n4Dicum - nn = n - n3Dfcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - endif - do ic = 1, ncat_hist - do k = 1, nzilyr - call gather_global(work_g1, a4Di(:,:,k,ic,nn,:), & - master_task, distrb_info) - if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_g1, & - start=(/ 1, 1,k,ic/), & - count=(/nx_global,ny_global,1, 1/)) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable '//avail_hist_fields(n)%vname) - endif - enddo ! k - enddo ! ic - endif - enddo ! num_avail_hist_fields_4Di - - work_g1(:,:) = c0 - - do n = n4Dicum+1, n4Dscum - nn = n - n4Dicum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - endif - do ic = 1, ncat_hist - do k = 1, nzslyr - call gather_global(work_g1, a4Ds(:,:,k,ic,nn,:), & - master_task, distrb_info) - if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_g1, & - start=(/ 1, 1,k,ic/), & - count=(/nx_global,ny_global,1, 1/)) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable '//avail_hist_fields(n)%vname) - endif - enddo ! k - enddo ! ic - endif - enddo ! num_avail_hist_fields_4Ds - - do n = n4Dscum+1, n4Dfcum - nn = n - n4Dscum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - if (my_task == master_task) then - status = nf90_inq_varid(ncid,avail_hist_fields(n)%vname,varid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - endif - do ic = 1, ncat_hist - do k = 1, nfsd_hist - call gather_global(work_g1, a4Df(:,:,k,ic,nn,:), & - master_task, distrb_info) - if (my_task == master_task) then - status = nf90_put_var(ncid,varid,work_g1, & - start=(/ 1, 1,k,ic/), & - count=(/nx_global,ny_global,1, 1/)) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: writing variable '//avail_hist_fields(n)%vname) - endif - enddo ! k - enddo ! ic - endif - enddo ! num_avail_hist_fields_4Df - - deallocate(work_g1) - - !----------------------------------------------------------------- - ! close output dataset - !----------------------------------------------------------------- - - if (my_task == master_task) then - status = nf90_close(ncid) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: closing netCDF history file') - write(nu_diag,*) ' ' - write(nu_diag,*) 'Finished writing ',trim(ncfile(ns)) - endif - -#else - call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) -#endif - - end subroutine ice_write_hist - -!======================================================================= - - subroutine ice_write_hist_attrs(ncid, varid, hfield, ns) - - use ice_kinds_mod - use ice_calendar, only: histfreq, histfreq_n - use ice_history_shared, only: ice_hist_field, history_precision, & - hist_avg -#ifdef USE_NETCDF - use netcdf -#endif - - integer (kind=int_kind), intent(in) :: ncid ! netcdf file id - integer (kind=int_kind), intent(in) :: varid ! netcdf variable id - type (ice_hist_field) , intent(in) :: hfield ! history file info - integer (kind=int_kind), intent(in) :: ns ! history stream - - ! local variables - - integer (kind=int_kind) :: status - character(len=*), parameter :: subname = '(ice_write_hist_attrs)' - -#ifdef USE_NETCDF - status = nf90_put_att(ncid,varid,'units', hfield%vunit) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining units for '//hfield%vname) - - status = nf90_put_att(ncid,varid, 'long_name', hfield%vdesc) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining long_name for '//hfield%vname) - - status = nf90_put_att(ncid,varid,'coordinates', hfield%vcoord) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining coordinates for '//hfield%vname) - - status = nf90_put_att(ncid,varid,'cell_measures', hfield%vcellmeas) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell measures for '//hfield%vname) - - if (hfield%vcomment /= "none") then - status = nf90_put_att(ncid,varid,'comment', hfield%vcomment) - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining comment for '//hfield%vname) - endif - - call ice_write_hist_fill(ncid,varid,hfield%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - if (TRIM(hfield%vname(1:4))/='sig1' & - .and.TRIM(hfield%vname(1:4))/='sig2' & - .and.TRIM(hfield%vname(1:9))/='sistreave' & - .and.TRIM(hfield%vname(1:9))/='sistremax' & - .and.TRIM(hfield%vname(1:4))/='sigP') then - status = nf90_put_att(ncid,varid,'cell_methods','time: mean') - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining cell methods for '//hfield%vname) - endif - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg & - .or.TRIM(hfield%vname(1:4))=='divu' & - .or.TRIM(hfield%vname(1:5))=='shear' & - .or.TRIM(hfield%vname(1:4))=='sig1' & - .or.TRIM(hfield%vname(1:4))=='sig2' & - .or.TRIM(hfield%vname(1:4))=='sigP' & - .or.TRIM(hfield%vname(1:5))=='trsig' & - .or.TRIM(hfield%vname(1:9))=='sistreave' & - .or.TRIM(hfield%vname(1:9))=='sistremax' & - .or.TRIM(hfield%vname(1:9))=='mlt_onset' & - .or.TRIM(hfield%vname(1:9))=='frz_onset' & - .or.TRIM(hfield%vname(1:6))=='hisnap' & - .or.TRIM(hfield%vname(1:6))=='aisnap') then - status = nf90_put_att(ncid,varid,'time_rep','instantaneous') - else - status = nf90_put_att(ncid,varid,'time_rep','averaged') - endif - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining time rep for '//hfield%vname) - -#else - call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) -#endif - - end subroutine ice_write_hist_attrs - -!======================================================================= - - subroutine ice_write_hist_fill(ncid,varid,vname,precision) - - use ice_kinds_mod -#ifdef USE_NETCDF - use netcdf -#endif - - integer (kind=int_kind), intent(in) :: ncid ! netcdf file id - integer (kind=int_kind), intent(in) :: varid ! netcdf var id - character(len=*), intent(in) :: vname ! var name - integer (kind=int_kind), intent(in) :: precision ! precision - - ! local variables - - integer (kind=int_kind) :: status - character(len=*), parameter :: subname = '(ice_write_hist_fill)' - -#ifdef USE_NETCDF - if (precision == 8) then - status = nf90_put_att(ncid,varid,'missing_value',spval_dbl) - else - status = nf90_put_att(ncid,varid,'missing_value',spval) - endif - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining missing_value for '//trim(vname)) - - if (precision == 8) then - status = nf90_put_att(ncid,varid,'_FillValue',spval_dbl) - else - status = nf90_put_att(ncid,varid,'_FillValue',spval) - endif - if (status /= nf90_noerr) call abort_ice(subname// & - 'ERROR: defining _FillValue for '//trim(vname)) -#else - call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & - file=__FILE__, line=__LINE__) -#endif - - end subroutine ice_write_hist_fill - -!======================================================================= - - end module ice_history_write - -!======================================================================= diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 deleted file mode 100644 index 6407d8c76..000000000 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_history_write.F90 +++ /dev/null @@ -1,1298 +0,0 @@ -!======================================================================= -! -! Writes history in netCDF format -! -! authors Tony Craig and Bruce Briegleb, NCAR -! Elizabeth C. Hunke and William H. Lipscomb, LANL -! C. M. Bitz, UW -! -! 2004 WHL: Block structure added -! 2006 ECH: Accepted some CESM code into mainstream CICE -! Added ice_present, aicen, vicen; removed aice1...10, vice1...1. -! Added histfreq_n and histfreq='h' options, removed histfreq='w' -! Converted to free source form (F90) -! Added option for binary output instead of netCDF -! 2009 D Bailey and ECH: Generalized for multiple frequency output -! 2010 Alison McLaren and ECH: Added 3D capability -! - module ice_history_write - - use ice_kinds_mod - use ice_constants, only: c0, c360, spval, spval_dbl - use ice_fileunits, only: nu_diag - use ice_exit, only: abort_ice - use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted - use icepack_intfc, only: icepack_query_parameters - - implicit none - private - public :: ice_write_hist - -!======================================================================= - - contains - -!======================================================================= -! -! write average ice quantities or snapshots -! -! author: Elizabeth C. Hunke, LANL - - subroutine ice_write_hist (ns) - - use ice_blocks, only: nx_block, ny_block - use ice_broadcast, only: broadcast_scalar - use ice_calendar, only: msec, timesecs, idate, idate0, write_ic, & - histfreq, histfreq_n, days_per_year, use_leap_years, dayyr, & - hh_init, mm_init, ss_init - use ice_communicate, only: my_task, master_task - use ice_domain, only: distrb_info, nblocks - use ice_domain_size, only: nx_global, ny_global, max_blocks, max_nstrm - use ice_gather_scatter, only: gather_global - use ice_grid, only: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT, & - hm, bm, uvm, npm, epm, & - dxU, dxT, dyU, dyT, dxN, dyN, dxE, dyE, HTN, HTE, ANGLE, ANGLET, & - tarea, uarea, narea, earea, tmask, umask, nmask, emask, & - lont_bounds, latt_bounds, lonu_bounds, latu_bounds, & - lonn_bounds, latn_bounds, lone_bounds, late_bounds - use ice_history_shared - use ice_arrays_column, only: hin_max, floe_rad_c - use ice_restart_shared, only: runid, lcdf64 - use ice_pio - use pio - - integer (kind=int_kind), intent(in) :: ns - - ! local variables - - integer (kind=int_kind) :: i,j,k,ic,n,nn, & - ncid,status,imtid,jmtid,kmtidi,kmtids,kmtidb, cmtid,timid, & - length,nvertexid,ivertex,kmtida,fmtid - integer (kind=int_kind), dimension(2) :: dimid2 - integer (kind=int_kind), dimension(3) :: dimid3 - integer (kind=int_kind), dimension(4) :: dimidz - integer (kind=int_kind), dimension(5) :: dimidcz - integer (kind=int_kind), dimension(3) :: dimid_nverts - integer (kind=int_kind), dimension(6) :: dimidex - real (kind= dbl_kind) :: ltime2 - character (char_len) :: title - character (char_len_long) :: ncfile(max_nstrm) - integer (kind=int_kind) :: iotype - - integer (kind=int_kind) :: icategory,ind,i_aice,boundid - - character (char_len) :: start_time,current_date,current_time - character (len=16) :: c_aice - character (len=8) :: cdate - - type(file_desc_t) :: File - type(io_desc_t) :: iodesc2d, & - iodesc3dc, iodesc3dv, iodesc3di, iodesc3db, iodesc3da, & - iodesc3df, & - iodesc4di, iodesc4ds, iodesc4df - type(var_desc_t) :: varid - - ! 8 coordinate variables: TLON, TLAT, ULON, ULAT, NLON, NLAT, ELON, ELAT - INTEGER (kind=int_kind), PARAMETER :: ncoord = 8 - - ! 4 vertices in each grid cell - INTEGER (kind=int_kind), PARAMETER :: nverts = 4 - - ! 8 variables describe T, U, N, E grid boundaries: - ! lont_bounds, latt_bounds, lonu_bounds, latu_bounds - ! lonn_bounds, latn_bounds, lone_bounds, late_bounds - INTEGER (kind=int_kind), PARAMETER :: nvar_verts = 8 - - TYPE coord_attributes ! netcdf coordinate attributes - character (len=11) :: short_name - character (len=45) :: long_name - character (len=20) :: units - END TYPE coord_attributes - - TYPE req_attributes ! req'd netcdf attributes - type (coord_attributes) :: req - character (len=20) :: coordinates - END TYPE req_attributes - - TYPE(req_attributes), dimension(nvar_grd) :: var_grd - TYPE(coord_attributes), dimension(ncoord) :: var_coord - TYPE(coord_attributes), dimension(nvar_verts) :: var_nverts - TYPE(coord_attributes), dimension(nvar_grdz) :: var_grdz - CHARACTER (char_len), dimension(ncoord) :: coord_bounds - - real (kind=dbl_kind) , allocatable :: workd2(:,:,:) - real (kind=dbl_kind) , allocatable :: workd3(:,:,:,:) - real (kind=dbl_kind) , allocatable :: workd4(:,:,:,:,:) - real (kind=dbl_kind) , allocatable :: workd3v(:,:,:,:) - - real (kind=real_kind), allocatable :: workr2(:,:,:) - real (kind=real_kind), allocatable :: workr3(:,:,:,:) - real (kind=real_kind), allocatable :: workr4(:,:,:,:,:) - real (kind=real_kind), allocatable :: workr3v(:,:,:,:) - - character(len=char_len_long) :: & - filename - - integer (kind=int_kind), dimension(1) :: & - tim_start,tim_length ! dimension quantities for netCDF - - integer (kind=int_kind), dimension(2) :: & - bnd_start,bnd_length ! dimension quantities for netCDF - - real (kind=dbl_kind) :: secday - real (kind=dbl_kind) :: rad_to_deg - - integer (kind=int_kind) :: lprecision - - character(len=*), parameter :: subname = '(ice_write_hist)' - - call icepack_query_parameters(secday_out=secday) - call icepack_query_parameters(rad_to_deg_out=rad_to_deg) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - if (my_task == master_task) then - call construct_filename(ncfile(ns),'nc',ns) - - ! add local directory path name to ncfile - if (write_ic) then - ncfile(ns) = trim(incond_dir)//ncfile(ns) - else - ncfile(ns) = trim(history_dir)//ncfile(ns) - endif - filename = ncfile(ns) - end if - call broadcast_scalar(filename, master_task) - - ! create file - - iotype = PIO_IOTYPE_NETCDF - if (history_format == 'pio_pnetcdf') iotype = PIO_IOTYPE_PNETCDF - File%fh=-1 - call ice_pio_init(mode='write', filename=trim(filename), File=File, & - clobber=.true., cdf64=lcdf64, iotype=iotype) - - call ice_pio_initdecomp(iodesc=iodesc2d, precision=history_precision) - call ice_pio_initdecomp(ndim3=ncat_hist, iodesc=iodesc3dc, precision=history_precision) - call ice_pio_initdecomp(ndim3=nzilyr, iodesc=iodesc3di, precision=history_precision) - call ice_pio_initdecomp(ndim3=nzblyr, iodesc=iodesc3db, precision=history_precision) - call ice_pio_initdecomp(ndim3=nzalyr, iodesc=iodesc3da, precision=history_precision) - call ice_pio_initdecomp(ndim3=nfsd_hist, iodesc=iodesc3df, precision=history_precision) - call ice_pio_initdecomp(ndim3=nverts, iodesc=iodesc3dv, inner_dim=.true., precision=history_precision) - call ice_pio_initdecomp(ndim3=nzilyr, ndim4=ncat_hist, iodesc=iodesc4di, precision=history_precision) - call ice_pio_initdecomp(ndim3=nzslyr, ndim4=ncat_hist, iodesc=iodesc4ds, precision=history_precision) - call ice_pio_initdecomp(ndim3=nfsd_hist, ndim4=ncat_hist, iodesc=iodesc4df, precision=history_precision) - - ltime2 = timesecs/secday - - ! option of turning on double precision history files - lprecision = pio_real - if (history_precision == 8) lprecision = pio_double - - !----------------------------------------------------------------- - ! define dimensions - !----------------------------------------------------------------- - - if (hist_avg) then - status = pio_def_dim(File,'d2',2,boundid) - endif - - status = pio_def_dim(File,'ni',nx_global,imtid) - status = pio_def_dim(File,'nj',ny_global,jmtid) - status = pio_def_dim(File,'nc',ncat_hist,cmtid) - status = pio_def_dim(File,'nkice',nzilyr,kmtidi) - status = pio_def_dim(File,'nksnow',nzslyr,kmtids) - status = pio_def_dim(File,'nkbio',nzblyr,kmtidb) - status = pio_def_dim(File,'nkaer',nzalyr,kmtida) - status = pio_def_dim(File,'time',PIO_UNLIMITED,timid) - status = pio_def_dim(File,'nvertices',nverts,nvertexid) - status = pio_def_dim(File,'nf',nfsd_hist,fmtid) - - !----------------------------------------------------------------- - ! define coordinate variables: time, time_bounds - !----------------------------------------------------------------- - - status = pio_def_var(File,'time',pio_double,(/timid/),varid) - status = pio_put_att(File,varid,'long_name','model time') - - write(cdate,'(i8.8)') idate0 - write(title,'(a,a4,a1,a2,a1,a2,a1,i2.2,a1,i2.2,a1,i2.2)') 'days since ', & - cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' ', & - hh_init,':',mm_init,':',ss_init - status = pio_put_att(File,varid,'units',trim(title)) - - if (days_per_year == 360) then - status = pio_put_att(File,varid,'calendar','360_day') - elseif (days_per_year == 365 .and. .not.use_leap_years ) then - status = pio_put_att(File,varid,'calendar','NoLeap') - elseif (use_leap_years) then - status = pio_put_att(File,varid,'calendar','Gregorian') - else - call abort_ice(subname//'ERROR: invalid calendar settings') - endif - - if (hist_avg) then - status = pio_put_att(File,varid,'bounds','time_bounds') - endif - - ! Define attributes for time_bounds if hist_avg is true - if (hist_avg) then - dimid2(1) = boundid - dimid2(2) = timid - status = pio_def_var(File,'time_bounds',pio_double,dimid2,varid) - status = pio_put_att(File,varid,'long_name', & - 'boundaries for time-averaging interval') - write(cdate,'(i8.8)') idate0 - write(title,'(a,a4,a1,a2,a1,a2,a1,i2.2,a1,i2.2,a1,i2.2)') 'days since ', & - cdate(1:4),'-',cdate(5:6),'-',cdate(7:8),' ', & - hh_init,':',mm_init,':',ss_init - status = pio_put_att(File,varid,'units',trim(title)) - endif - - !----------------------------------------------------------------- - ! define information for required time-invariant variables - !----------------------------------------------------------------- - - ind = 0 - ind = ind + 1 - var_coord(ind) = coord_attributes('TLON', & - 'T grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lont_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('TLAT', & - 'T grid center latitude', 'degrees_north') - coord_bounds(ind) = 'latt_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('ULON', & - 'U grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lonu_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('ULAT', & - 'U grid center latitude', 'degrees_north') - coord_bounds(ind) = 'latu_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('NLON', & - 'N grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lonn_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('NLAT', & - 'N grid center latitude', 'degrees_north') - coord_bounds(ind) = 'latn_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('ELON', & - 'E grid center longitude', 'degrees_east') - coord_bounds(ind) = 'lone_bounds' - ind = ind + 1 - var_coord(ind) = coord_attributes('ELAT', & - 'E grid center latitude', 'degrees_north') - coord_bounds(ind) = 'late_bounds' - - var_grdz(1) = coord_attributes('NCAT', 'category maximum thickness', 'm') - var_grdz(2) = coord_attributes('VGRDi', 'vertical ice levels', '1') - var_grdz(3) = coord_attributes('VGRDs', 'vertical snow levels', '1') - var_grdz(4) = coord_attributes('VGRDb', 'vertical ice-bio levels', '1') - var_grdz(5) = coord_attributes('VGRDa', 'vertical snow-ice-bio levels', '1') - var_grdz(6) = coord_attributes('NFSD', 'category floe size (center)', 'm') - - !----------------------------------------------------------------- - ! define information for optional time-invariant variables - !----------------------------------------------------------------- - - var_grd(n_tmask)%req = coord_attributes('tmask', & - 'mask of T grid cells, 0 = land, 1 = ocean', 'unitless') - var_grd(n_tmask)%coordinates = 'TLON TLAT' - var_grd(n_umask)%req = coord_attributes('umask', & - 'mask of U grid cells, 0 = land, 1 = ocean', 'unitless') - var_grd(n_umask)%coordinates = 'ULON ULAT' - var_grd(n_nmask)%req = coord_attributes('nmask', & - 'mask of N grid cells, 0 = land, 1 = ocean', 'unitless') - var_grd(n_nmask)%coordinates = 'NLON NLAT' - var_grd(n_emask)%req = coord_attributes('emask', & - 'mask of E grid cells, 0 = land, 1 = ocean', 'unitless') - var_grd(n_emask)%coordinates = 'ELON ELAT' - - var_grd(n_blkmask)%req = coord_attributes('blkmask', & - 'ice grid block mask, mytask + iblk/100', 'unitless') - var_grd(n_blkmask)%coordinates = 'TLON TLAT' - - var_grd(n_tarea)%req = coord_attributes('tarea', & - 'area of T grid cells', 'm^2') - var_grd(n_tarea)%coordinates = 'TLON TLAT' - var_grd(n_uarea)%req = coord_attributes('uarea', & - 'area of U grid cells', 'm^2') - var_grd(n_uarea)%coordinates = 'ULON ULAT' - var_grd(n_narea)%req = coord_attributes('narea', & - 'area of N grid cells', 'm^2') - var_grd(n_narea)%coordinates = 'NLON NLAT' - var_grd(n_earea)%req = coord_attributes('earea', & - 'area of E grid cells', 'm^2') - var_grd(n_earea)%coordinates = 'ELON ELAT' - - var_grd(n_dxt)%req = coord_attributes('dxt', & - 'T cell width through middle', 'm') - var_grd(n_dxt)%coordinates = 'TLON TLAT' - var_grd(n_dyt)%req = coord_attributes('dyt', & - 'T cell height through middle', 'm') - var_grd(n_dyt)%coordinates = 'TLON TLAT' - var_grd(n_dxu)%req = coord_attributes('dxu', & - 'U cell width through middle', 'm') - var_grd(n_dxu)%coordinates = 'ULON ULAT' - var_grd(n_dyu)%req = coord_attributes('dyu', & - 'U cell height through middle', 'm') - var_grd(n_dyu)%coordinates = 'ULON ULAT' - var_grd(n_dxn)%req = coord_attributes('dxn', & - 'N cell width through middle', 'm') - var_grd(n_dxn)%coordinates = 'NLON NLAT' - var_grd(n_dyn)%req = coord_attributes('dyn', & - 'N cell height through middle', 'm') - var_grd(n_dyn)%coordinates = 'NLON NLAT' - var_grd(n_dxe)%req = coord_attributes('dxe', & - 'E cell width through middle', 'm') - var_grd(n_dxe)%coordinates = 'ELON ELAT' - var_grd(n_dye)%req = coord_attributes('dye', & - 'E cell height through middle', 'm') - var_grd(n_dye)%coordinates = 'ELON ELAT' - - var_grd(n_HTN)%req = coord_attributes('HTN', & - 'T cell width on North side','m') - var_grd(n_HTN)%coordinates = 'TLON TLAT' - var_grd(n_HTE)%req = coord_attributes('HTE', & - 'T cell width on East side', 'm') - var_grd(n_HTE)%coordinates = 'TLON TLAT' - var_grd(n_ANGLE)%req = coord_attributes('ANGLE', & - 'angle grid makes with latitude line on U grid', & - 'radians') - var_grd(n_ANGLE)%coordinates = 'ULON ULAT' - var_grd(n_ANGLET)%req = coord_attributes('ANGLET', & - 'angle grid makes with latitude line on T grid', & - 'radians') - var_grd(n_ANGLET)%coordinates = 'TLON TLAT' - - ! These fields are required for CF compliance - ! dimensions (nx,ny,nverts) - var_nverts(n_lont_bnds) = coord_attributes('lont_bounds', & - 'longitude boundaries of T cells', 'degrees_east') - var_nverts(n_latt_bnds) = coord_attributes('latt_bounds', & - 'latitude boundaries of T cells', 'degrees_north') - var_nverts(n_lonu_bnds) = coord_attributes('lonu_bounds', & - 'longitude boundaries of U cells', 'degrees_east') - var_nverts(n_latu_bnds) = coord_attributes('latu_bounds', & - 'latitude boundaries of U cells', 'degrees_north') - var_nverts(n_lonn_bnds) = coord_attributes('lonn_bounds', & - 'longitude boundaries of N cells', 'degrees_east') - var_nverts(n_latn_bnds) = coord_attributes('latn_bounds', & - 'latitude boundaries of N cells', 'degrees_north') - var_nverts(n_lone_bnds) = coord_attributes('lone_bounds', & - 'longitude boundaries of E cells', 'degrees_east') - var_nverts(n_late_bnds) = coord_attributes('late_bounds', & - 'latitude boundaries of E cells', 'degrees_north') - - !----------------------------------------------------------------- - ! define attributes for time-invariant variables - !----------------------------------------------------------------- - - dimid2(1) = imtid - dimid2(2) = jmtid - - do i = 1, ncoord - status = pio_def_var(File, trim(var_coord(i)%short_name), lprecision, & - dimid2, varid) - status = pio_put_att(File,varid,'long_name',trim(var_coord(i)%long_name)) - status = pio_put_att(File, varid, 'units', trim(var_coord(i)%units)) - call ice_write_hist_fill(File,varid,var_coord(i)%short_name,history_precision) - if (var_coord(i)%short_name == 'ULAT') then - status = pio_put_att(File,varid,'comment', & - trim('Latitude of NE corner of T grid cell')) - endif - if (f_bounds) then - status = pio_put_att(File, varid, 'bounds', trim(coord_bounds(i))) - endif - enddo - - ! Extra dimensions (NCAT, NZILYR, NZSLYR, NZBLYR, NZALYR, NFSD) - dimidex(1)=cmtid - dimidex(2)=kmtidi - dimidex(3)=kmtids - dimidex(4)=kmtidb - dimidex(5)=kmtida - dimidex(6)=fmtid - - do i = 1, nvar_grdz - if (igrdz(i)) then - status = pio_def_var(File, trim(var_grdz(i)%short_name), lprecision, & - (/dimidex(i)/), varid) - status = pio_put_att(File, varid, 'long_name', var_grdz(i)%long_name) - status = pio_put_att(File, varid, 'units' , var_grdz(i)%units) - endif - enddo - - do i = 1, nvar_grd - if (igrd(i)) then - status = pio_def_var(File, trim(var_grd(i)%req%short_name), & - lprecision, dimid2, varid) - status = pio_put_att(File,varid, 'long_name', trim(var_grd(i)%req%long_name)) - status = pio_put_att(File, varid, 'units', trim(var_grd(i)%req%units)) - status = pio_put_att(File, varid, 'coordinates', trim(var_grd(i)%coordinates)) - call ice_write_hist_fill(File,varid,var_grd(i)%req%short_name,history_precision) - endif - enddo - - ! Fields with dimensions (nverts,nx,ny) - dimid_nverts(1) = nvertexid - dimid_nverts(2) = imtid - dimid_nverts(3) = jmtid - do i = 1, nvar_verts - if (f_bounds) then - status = pio_def_var(File, trim(var_nverts(i)%short_name), & - lprecision,dimid_nverts, varid) - status = & - pio_put_att(File,varid, 'long_name', trim(var_nverts(i)%long_name)) - status = & - pio_put_att(File, varid, 'units', trim(var_nverts(i)%units)) - call ice_write_hist_fill(File,varid,var_nverts(i)%short_name,history_precision) - endif - enddo - - !----------------------------------------------------------------- - ! define attributes for time-variant variables - !----------------------------------------------------------------- - - !----------------------------------------------------------------- - ! 2D - !----------------------------------------------------------------- - - dimid3(1) = imtid - dimid3(2) = jmtid - dimid3(3) = timid - - do n=1,num_avail_hist_fields_2D - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & - lprecision, dimid3, varid) - call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_2D - - !----------------------------------------------------------------- - ! 3D (category) - !----------------------------------------------------------------- - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = cmtid - dimidz(4) = timid - - do n = n2D + 1, n3Dccum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & - lprecision, dimidz, varid) - call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_3Dc - - !----------------------------------------------------------------- - ! 3D (ice layers) - !----------------------------------------------------------------- - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = kmtidi - dimidz(4) = timid - - do n = n3Dccum + 1, n3Dzcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & - lprecision, dimidz, varid) - call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_3Dz - - !----------------------------------------------------------------- - ! 3D (biology ice layers) - !----------------------------------------------------------------- - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = kmtidb - dimidz(4) = timid - - do n = n3Dzcum + 1, n3Dbcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & - lprecision, dimidz, varid) - call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_3Db - - !----------------------------------------------------------------- - ! 3D (biology snow layers) - !----------------------------------------------------------------- - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = kmtida - dimidz(4) = timid - - do n = n3Dbcum + 1, n3Dacum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & - lprecision, dimidz, varid) - call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_3Da - - !----------------------------------------------------------------- - ! 3D (fsd) - !----------------------------------------------------------------- - - dimidz(1) = imtid - dimidz(2) = jmtid - dimidz(3) = fmtid - dimidz(4) = timid - - do n = n3Dacum + 1, n3Dfcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & - lprecision, dimidz, varid) - call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_3Df - - !----------------------------------------------------------------- - ! define attributes for 4D variables - ! time coordinate is dropped - !----------------------------------------------------------------- - - !----------------------------------------------------------------- - ! 4D (ice categories) - !----------------------------------------------------------------- - - dimidcz(1) = imtid - dimidcz(2) = jmtid - dimidcz(3) = kmtidi - dimidcz(4) = cmtid - dimidcz(5) = timid - - do n = n3Dfcum + 1, n4Dicum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & - lprecision, dimidcz, varid) - call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_4Di - - !----------------------------------------------------------------- - ! 4D (snow layers) - !----------------------------------------------------------------- - - dimidcz(1) = imtid - dimidcz(2) = jmtid - dimidcz(3) = kmtids - dimidcz(4) = cmtid - dimidcz(5) = timid - - do n = n4Dicum + 1, n4Dscum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & - lprecision, dimidcz, varid) - call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_4Ds - - - !----------------------------------------------------------------- - ! 4D (fsd layers) - !----------------------------------------------------------------- - - dimidcz(1) = imtid - dimidcz(2) = jmtid - dimidcz(3) = fmtid - dimidcz(4) = cmtid - dimidcz(5) = timid - - do n = n4Dscum + 1, n4Dfcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_def_var(File, trim(avail_hist_fields(n)%vname), & - lprecision, dimidcz, varid) - call ice_write_hist_attrs(File,varid,avail_hist_fields(n),ns) - endif - enddo ! num_avail_hist_fields_4Df - - !----------------------------------------------------------------- - ! global attributes - !----------------------------------------------------------------- - ! ... the user should change these to something useful ... - !----------------------------------------------------------------- -#ifdef CESMCOUPLED - status = pio_put_att(File,pio_global,'title',runid) -#else - title = 'sea ice model output for CICE' - status = pio_put_att(File,pio_global,'title',trim(title)) -#endif - title = 'Diagnostic and Prognostic Variables' - status = pio_put_att(File,pio_global,'contents',trim(title)) - - write(title,'(2a)') 'Los Alamos Sea Ice Model, ', trim(version_name) - status = pio_put_att(File,pio_global,'source',trim(title)) - - if (use_leap_years) then - write(title,'(a,i3,a)') 'This year has ',dayyr,' days' - else - write(title,'(a,i3,a)') 'All years have exactly ',dayyr,' days' - endif - status = pio_put_att(File,pio_global,'comment',trim(title)) - - write(title,'(a,i8.8)') 'File written on model date ',idate - status = pio_put_att(File,pio_global,'comment2',trim(title)) - - write(title,'(a,i6)') 'seconds elapsed into model date: ',msec - status = pio_put_att(File,pio_global,'comment3',trim(title)) - - title = 'CF-1.0' - status = & - pio_put_att(File,pio_global,'conventions',trim(title)) - - call date_and_time(date=current_date, time=current_time) - write(start_time,1000) current_date(1:4), current_date(5:6), & - current_date(7:8), current_time(1:2), & - current_time(3:4) -1000 format('This dataset was created on ', & - a,'-',a,'-',a,' at ',a,':',a) - status = pio_put_att(File,pio_global,'history',trim(start_time)) - - if (history_format == 'pio_pnetcdf') then - status = pio_put_att(File,pio_global,'io_flavor','io_pio pnetcdf') - else - status = pio_put_att(File,pio_global,'io_flavor','io_pio netcdf') - endif - - !----------------------------------------------------------------- - ! end define mode - !----------------------------------------------------------------- - - status = pio_enddef(File) - - !----------------------------------------------------------------- - ! write time variable - !----------------------------------------------------------------- - - status = pio_inq_varid(File,'time',varid) - status = pio_put_var(File,varid,(/1/),ltime2) - - !----------------------------------------------------------------- - ! write time_bounds info - !----------------------------------------------------------------- - - if (hist_avg) then - status = pio_inq_varid(File,'time_bounds',varid) - time_bounds=(/time_beg(ns),time_end(ns)/) - bnd_start = (/1,1/) - bnd_length = (/2,1/) - status = pio_put_var(File,varid,ival=time_bounds, & - start=bnd_start(:),count=bnd_length(:)) - endif - - !----------------------------------------------------------------- - ! write coordinate variables - !----------------------------------------------------------------- - - allocate(workd2(nx_block,ny_block,nblocks)) - allocate(workr2(nx_block,ny_block,nblocks)) - - do i = 1,ncoord - status = pio_inq_varid(File, var_coord(i)%short_name, varid) - SELECT CASE (var_coord(i)%short_name) - CASE ('TLON') - ! Convert T grid longitude from -180 -> 180 to 0 to 360 - workd2(:,:,:) = mod(tlon(:,:,1:nblocks)*rad_to_deg + c360, c360) - CASE ('TLAT') - workd2(:,:,:) = tlat(:,:,1:nblocks)*rad_to_deg - CASE ('ULON') - workd2(:,:,:) = ulon(:,:,1:nblocks)*rad_to_deg - CASE ('ULAT') - workd2(:,:,:) = ulat(:,:,1:nblocks)*rad_to_deg - CASE ('NLON') - workd2(:,:,:) = nlon(:,:,1:nblocks)*rad_to_deg - CASE ('NLAT') - workd2(:,:,:) = nlat(:,:,1:nblocks)*rad_to_deg - CASE ('ELON') - workd2(:,:,:) = elon(:,:,1:nblocks)*rad_to_deg - CASE ('ELAT') - workd2(:,:,:) = elat(:,:,1:nblocks)*rad_to_deg - END SELECT - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc2d, & - workd2, status, fillval=spval_dbl) - else - workr2 = workd2 - call pio_write_darray(File, varid, iodesc2d, & - workr2, status, fillval=spval) - endif - enddo - - ! Extra dimensions (NCAT, NFSD, VGRD*) - - do i = 1, nvar_grdz - if (igrdz(i)) then - status = pio_inq_varid(File, var_grdz(i)%short_name, varid) - SELECT CASE (var_grdz(i)%short_name) - CASE ('NCAT') - status = pio_put_var(File, varid, hin_max(1:ncat_hist)) - CASE ('NFSD') - status = pio_put_var(File, varid, floe_rad_c(1:nfsd_hist)) - CASE ('VGRDi') - status = pio_put_var(File, varid, (/(k, k=1,nzilyr)/)) - CASE ('VGRDs') - status = pio_put_var(File, varid, (/(k, k=1,nzslyr)/)) - CASE ('VGRDb') - status = pio_put_var(File, varid, (/(k, k=1,nzblyr)/)) - CASE ('VGRDa') - status = pio_put_var(File, varid, (/(k, k=1,nzalyr)/)) - END SELECT - endif - enddo - - !----------------------------------------------------------------- - ! write grid masks, area and rotation angle - !----------------------------------------------------------------- - - do i = 1, nvar_grd - if (igrd(i)) then - SELECT CASE (var_grd(i)%req%short_name) - CASE ('tmask') - workd2 = hm(:,:,1:nblocks) - CASE ('umask') - workd2 = uvm(:,:,1:nblocks) - CASE ('nmask') - workd2 = npm(:,:,1:nblocks) - CASE ('emask') - workd2 = epm(:,:,1:nblocks) - CASE ('blkmask') - workd2 = bm(:,:,1:nblocks) - CASE ('tarea') - workd2 = tarea(:,:,1:nblocks) - CASE ('uarea') - workd2 = uarea(:,:,1:nblocks) - CASE ('narea') - workd2 = narea(:,:,1:nblocks) - CASE ('earea') - workd2 = earea(:,:,1:nblocks) - CASE ('dxt') - workd2 = dxT(:,:,1:nblocks) - CASE ('dyt') - workd2 = dyT(:,:,1:nblocks) - CASE ('dxu') - workd2 = dxU(:,:,1:nblocks) - CASE ('dyu') - workd2 = dyU(:,:,1:nblocks) - CASE ('dxn') - workd2 = dxN(:,:,1:nblocks) - CASE ('dyn') - workd2 = dyN(:,:,1:nblocks) - CASE ('dxe') - workd2 = dxE(:,:,1:nblocks) - CASE ('dye') - workd2 = dyE(:,:,1:nblocks) - CASE ('HTN') - workd2 = HTN(:,:,1:nblocks) - CASE ('HTE') - workd2 = HTE(:,:,1:nblocks) - CASE ('ANGLE') - workd2 = ANGLE(:,:,1:nblocks) - CASE ('ANGLET') - workd2 = ANGLET(:,:,1:nblocks) - END SELECT - status = pio_inq_varid(File, var_grd(i)%req%short_name, varid) - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc2d, & - workd2, status, fillval=spval_dbl) - else - workr2 = workd2 - call pio_write_darray(File, varid, iodesc2d, & - workr2, status, fillval=spval) - endif - endif - enddo - - !---------------------------------------------------------------- - ! Write coordinates of grid box vertices - !---------------------------------------------------------------- - - if (f_bounds) then - allocate(workd3v(nverts,nx_block,ny_block,nblocks)) - allocate(workr3v(nverts,nx_block,ny_block,nblocks)) - workd3v (:,:,:,:) = c0 - do i = 1, nvar_verts - SELECT CASE (var_nverts(i)%short_name) - CASE ('lont_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = lont_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('latt_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = latt_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('lonu_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = lonu_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('latu_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = latu_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('lonn_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = lonn_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('latn_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = latn_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('lone_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = lone_bounds(ivertex,:,:,1:nblocks) - enddo - CASE ('late_bounds') - do ivertex = 1, nverts - workd3v(ivertex,:,:,:) = late_bounds(ivertex,:,:,1:nblocks) - enddo - END SELECT - - status = pio_inq_varid(File, var_nverts(i)%short_name, varid) - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc3dv, & - workd3v, status, fillval=spval_dbl) - else - workr3v = workd3v - call pio_write_darray(File, varid, iodesc3dv, & - workr3v, status, fillval=spval) - endif - enddo - deallocate(workd3v) - deallocate(workr3v) - endif ! f_bounds - - - !----------------------------------------------------------------- - ! write variable data - !----------------------------------------------------------------- - - ! 2D - do n=1,num_avail_hist_fields_2D - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) - if (status /= pio_noerr) call abort_ice(subname// & - 'ERROR getting varid for '//avail_hist_fields(n)%vname) - workd2(:,:,:) = a2D(:,:,n,1:nblocks) -#ifdef CESM1_PIO - call pio_setframe(varid, int(1,kind=PIO_OFFSET)) -#else - call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) -#endif - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc2d,& - workd2, status, fillval=spval_dbl) - else - workr2 = workd2 - call pio_write_darray(File, varid, iodesc2d,& - workr2, status, fillval=spval) - endif - endif - enddo ! num_avail_hist_fields_2D - - deallocate(workd2) - deallocate(workr2) - - ! 3D (category) - allocate(workd3(nx_block,ny_block,nblocks,ncat_hist)) - allocate(workr3(nx_block,ny_block,nblocks,ncat_hist)) - do n = n2D + 1, n3Dccum - nn = n - n2D - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) - if (status /= pio_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - do j = 1, nblocks - do i = 1, ncat_hist - workd3(:,:,j,i) = a3Dc(:,:,i,nn,j) - enddo - enddo -#ifdef CESM1_PIO - call pio_setframe(varid, int(1,kind=PIO_OFFSET)) -#else - call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) -#endif - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc3dc,& - workd3, status, fillval=spval_dbl) - else - workr3 = workd3 - call pio_write_darray(File, varid, iodesc3dc,& - workr3, status, fillval=spval) - endif - endif - enddo ! num_avail_hist_fields_3Dc - deallocate(workd3) - deallocate(workr3) - - ! 3D (vertical ice) - allocate(workd3(nx_block,ny_block,nblocks,nzilyr)) - allocate(workr3(nx_block,ny_block,nblocks,nzilyr)) - do n = n3Dccum+1, n3Dzcum - nn = n - n3Dccum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) - if (status /= pio_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - do j = 1, nblocks - do i = 1, nzilyr - workd3(:,:,j,i) = a3Dz(:,:,i,nn,j) - enddo - enddo -#ifdef CESM1_PIO - call pio_setframe(varid, int(1,kind=PIO_OFFSET)) -#else - call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) -#endif - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc3di,& - workd3, status, fillval=spval_dbl) - else - workr3 = workd3 - call pio_write_darray(File, varid, iodesc3di,& - workr3, status, fillval=spval) - endif - endif - enddo ! num_avail_hist_fields_3Dz - deallocate(workd3) - deallocate(workr3) - - ! 3D (vertical ice biology) - allocate(workd3(nx_block,ny_block,nblocks,nzblyr)) - allocate(workr3(nx_block,ny_block,nblocks,nzblyr)) - do n = n3Dzcum+1, n3Dbcum - nn = n - n3Dzcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) - if (status /= pio_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - do j = 1, nblocks - do i = 1, nzblyr - workd3(:,:,j,i) = a3Db(:,:,i,nn,j) - enddo - enddo -#ifdef CESM1_PIO - call pio_setframe(varid, int(1,kind=PIO_OFFSET)) -#else - call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) -#endif - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc3db,& - workd3, status, fillval=spval_dbl) - else - workr3 = workd3 - call pio_write_darray(File, varid, iodesc3db,& - workr3, status, fillval=spval) - endif - endif - enddo ! num_avail_hist_fields_3Db - deallocate(workd3) - deallocate(workr3) - - ! 3D (vertical snow biology) - allocate(workd3(nx_block,ny_block,nblocks,nzalyr)) - allocate(workr3(nx_block,ny_block,nblocks,nzalyr)) - do n = n3Dbcum+1, n3Dacum - nn = n - n3Dbcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) - if (status /= pio_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - do j = 1, nblocks - do i = 1, nzalyr - workd3(:,:,j,i) = a3Da(:,:,i,nn,j) - enddo - enddo -#ifdef CESM1_PIO - call pio_setframe(varid, int(1,kind=PIO_OFFSET)) -#else - call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) -#endif - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc3da,& - workd3, status, fillval=spval_dbl) - else - workr3 = workd3 - call pio_write_darray(File, varid, iodesc3da,& - workr3, status, fillval=spval) - endif - endif - enddo ! num_avail_hist_fields_3Db - deallocate(workd3) - deallocate(workr3) - - ! 3D (fsd) - allocate(workd3(nx_block,ny_block,nblocks,nfsd_hist)) - allocate(workr3(nx_block,ny_block,nblocks,nfsd_hist)) - do n = n3Dacum+1, n3Dfcum - nn = n - n3Dacum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) - if (status /= pio_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - do j = 1, nblocks - do i = 1, nfsd_hist - workd3(:,:,j,i) = a3Df(:,:,i,nn,j) - enddo - enddo -#ifdef CESM1_PIO - call pio_setframe(varid, int(1,kind=PIO_OFFSET)) -#else - call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) -#endif - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc3df,& - workd3, status, fillval=spval_dbl) - else - workr3 = workd3 - call pio_write_darray(File, varid, iodesc3df,& - workr3, status, fillval=spval) - endif - endif - enddo ! num_avail_hist_fields_3Df - deallocate(workd3) - deallocate(workr3) - - allocate(workd4(nx_block,ny_block,nblocks,ncat_hist,nzilyr)) - allocate(workr4(nx_block,ny_block,nblocks,ncat_hist,nzilyr)) - ! 4D (categories, fsd) - do n = n3Dfcum+1, n4Dicum - nn = n - n3Dfcum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) - if (status /= pio_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - do j = 1, nblocks - do i = 1, ncat_hist - do k = 1, nzilyr - workd4(:,:,j,i,k) = a4Di(:,:,k,i,nn,j) - enddo ! k - enddo ! i - enddo ! j -#ifdef CESM1_PIO - call pio_setframe(varid, int(1,kind=PIO_OFFSET)) -#else - call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) -#endif - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc4di,& - workd4, status, fillval=spval_dbl) - else - workr4 = workd4 - call pio_write_darray(File, varid, iodesc4di,& - workr4, status, fillval=spval) - endif - endif - enddo ! num_avail_hist_fields_4Di - deallocate(workd4) - deallocate(workr4) - - allocate(workd4(nx_block,ny_block,nblocks,ncat_hist,nzslyr)) - allocate(workr4(nx_block,ny_block,nblocks,ncat_hist,nzslyr)) - ! 4D (categories, vertical ice) - do n = n4Dicum+1, n4Dscum - nn = n - n4Dicum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) - if (status /= pio_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - do j = 1, nblocks - do i = 1, ncat_hist - do k = 1, nzslyr - workd4(:,:,j,i,k) = a4Ds(:,:,k,i,nn,j) - enddo ! k - enddo ! i - enddo ! j -#ifdef CESM1_PIO - call pio_setframe(varid, int(1,kind=PIO_OFFSET)) -#else - call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) -#endif - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc4ds,& - workd4, status, fillval=spval_dbl) - else - workr4 = workd4 - call pio_write_darray(File, varid, iodesc4ds,& - workr4, status, fillval=spval) - endif - endif - enddo ! num_avail_hist_fields_4Ds - deallocate(workd4) - deallocate(workr4) - - allocate(workd4(nx_block,ny_block,nblocks,ncat_hist,nfsd_hist)) - allocate(workr4(nx_block,ny_block,nblocks,ncat_hist,nfsd_hist)) - ! 4D (categories, vertical ice) - do n = n4Dscum+1, n4Dfcum - nn = n - n4Dscum - if (avail_hist_fields(n)%vhistfreq == histfreq(ns) .or. write_ic) then - status = pio_inq_varid(File,avail_hist_fields(n)%vname,varid) - if (status /= pio_noerr) call abort_ice(subname// & - 'ERROR: getting varid for '//avail_hist_fields(n)%vname) - do j = 1, nblocks - do i = 1, ncat_hist - do k = 1, nfsd_hist - workd4(:,:,j,i,k) = a4Df(:,:,k,i,nn,j) - enddo ! k - enddo ! i - enddo ! j -#ifdef CESM1_PIO - call pio_setframe(varid, int(1,kind=PIO_OFFSET)) -#else - call pio_setframe(File, varid, int(1,kind=PIO_OFFSET_KIND)) -#endif - if (history_precision == 8) then - call pio_write_darray(File, varid, iodesc4df,& - workd4, status, fillval=spval_dbl) - else - workr4 = workd4 - call pio_write_darray(File, varid, iodesc4df,& - workr4, status, fillval=spval) - endif - endif - enddo ! num_avail_hist_fields_4Df - deallocate(workd4) - deallocate(workr4) - -! similarly for num_avail_hist_fields_4Db (define workd4b, iodesc4db) - - - !----------------------------------------------------------------- - ! clean-up PIO descriptors - !----------------------------------------------------------------- - - call pio_freedecomp(File,iodesc2d) - call pio_freedecomp(File,iodesc3dv) - call pio_freedecomp(File,iodesc3dc) - call pio_freedecomp(File,iodesc3di) - call pio_freedecomp(File,iodesc3db) - call pio_freedecomp(File,iodesc3da) - call pio_freedecomp(File,iodesc3df) - call pio_freedecomp(File,iodesc4di) - call pio_freedecomp(File,iodesc4ds) - call pio_freedecomp(File,iodesc4df) - - !----------------------------------------------------------------- - ! close output dataset - !----------------------------------------------------------------- - - call pio_closefile(File) - if (my_task == master_task) then - write(nu_diag,*) ' ' - write(nu_diag,*) 'Finished writing ',trim(ncfile(ns)) - endif - - end subroutine ice_write_hist - -!======================================================================= - - subroutine ice_write_hist_attrs(File, varid, hfield, ns) - - use ice_kinds_mod - use ice_calendar, only: histfreq, histfreq_n - use ice_history_shared, only: ice_hist_field, history_precision, & - hist_avg - use ice_pio - use pio - - type(file_desc_t) :: File ! file id - type(var_desc_t) :: varid ! variable id - type (ice_hist_field), intent(in) :: hfield ! history file info - integer (kind=int_kind), intent(in) :: ns - - ! local variables - - integer (kind=int_kind) :: status - character(len=*), parameter :: subname = '(ice_write_hist_attrs)' - - status = pio_put_att(File,varid,'units', trim(hfield%vunit)) - - status = pio_put_att(File,varid, 'long_name', trim(hfield%vdesc)) - - status = pio_put_att(File,varid,'coordinates', trim(hfield%vcoord)) - - status = pio_put_att(File,varid,'cell_measures', trim(hfield%vcellmeas)) - - if (hfield%vcomment /= "none") then - status = pio_put_att(File,varid,'comment', trim(hfield%vcomment)) - endif - - call ice_write_hist_fill(File,varid,hfield%vname,history_precision) - - ! Add cell_methods attribute to variables if averaged - if (hist_avg) then - if (TRIM(hfield%vname(1:4))/='sig1' & - .and.TRIM(hfield%vname(1:4))/='sig2' & - .and.TRIM(hfield%vname(1:9))/='sistreave' & - .and.TRIM(hfield%vname(1:9))/='sistremax' & - .and.TRIM(hfield%vname(1:4))/='sigP') then - status = pio_put_att(File,varid,'cell_methods','time: mean') - endif - endif - - if ((histfreq(ns) == '1' .and. histfreq_n(ns) == 1) & - .or..not. hist_avg & - .or.TRIM(hfield%vname(1:4))=='divu' & - .or.TRIM(hfield%vname(1:5))=='shear' & - .or.TRIM(hfield%vname(1:4))=='sig1' & - .or.TRIM(hfield%vname(1:4))=='sig2' & - .or.TRIM(hfield%vname(1:4))=='sigP' & - .or.TRIM(hfield%vname(1:5))=='trsig' & - .or.TRIM(hfield%vname(1:9))=='sistreave' & - .or.TRIM(hfield%vname(1:9))=='sistremax' & - .or.TRIM(hfield%vname(1:9))=='mlt_onset' & - .or.TRIM(hfield%vname(1:9))=='frz_onset' & - .or.TRIM(hfield%vname(1:6))=='hisnap' & - .or.TRIM(hfield%vname(1:6))=='aisnap') then - status = pio_put_att(File,varid,'time_rep','instantaneous') - else - status = pio_put_att(File,varid,'time_rep','averaged') - endif - - end subroutine ice_write_hist_attrs - -!======================================================================= - - subroutine ice_write_hist_fill(File,varid,vname,precision) - - use ice_kinds_mod - use ice_pio - use pio - - type(file_desc_t) , intent(inout) :: File - type(var_desc_t) , intent(in) :: varid - character(len=*), intent(in) :: vname ! var name - integer (kind=int_kind), intent(in) :: precision ! precision - - ! local variables - - integer (kind=int_kind) :: status - character(len=*), parameter :: subname = '(ice_write_hist_fill)' - - if (precision == 8) then - status = pio_put_att(File, varid, 'missing_value', spval_dbl) - status = pio_put_att(File, varid,'_FillValue',spval_dbl) - else - status = pio_put_att(File, varid, 'missing_value', spval) - status = pio_put_att(File, varid,'_FillValue',spval) - endif - - end subroutine ice_write_hist_fill - -!======================================================================= - - end module ice_history_write - -!======================================================================= diff --git a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 b/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 deleted file mode 100644 index 29477973a..000000000 --- a/cicecore/cicedynB/infrastructure/io/io_pio2/ice_restart.F90 +++ /dev/null @@ -1,914 +0,0 @@ -!======================================================================= -! -! Read and write ice model restart files using pio interfaces. -! authors David A Bailey, NCAR - - module ice_restart - - use ice_broadcast - use ice_exit, only: abort_ice - use ice_fileunits, only: nu_diag, nu_restart, nu_rst_pointer - use ice_kinds_mod - use ice_restart_shared, only: & - restart, restart_ext, restart_dir, restart_file, pointer_file, & - runid, runtype, use_restart_time, restart_format, lcdf64, lenstr, & - restart_coszen - use ice_pio - use pio - use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted - use icepack_intfc, only: icepack_query_tracer_flags, icepack_query_tracer_indices - use icepack_intfc, only: icepack_query_parameters, icepack_query_tracer_sizes - - implicit none - private - public :: init_restart_write, init_restart_read, & - read_restart_field, write_restart_field, final_restart - - type(file_desc_t) :: File - type(var_desc_t) :: vardesc - - type(io_desc_t) :: iodesc2d - type(io_desc_t) :: iodesc3d_ncat - -!======================================================================= - - contains - -!======================================================================= - -! Sets up restart file for reading. -! author David A Bailey, NCAR - - subroutine init_restart_read(ice_ic) - - use ice_calendar, only: istep0, istep1, myear, mmonth, & - mday, msec, npt - use ice_communicate, only: my_task, master_task - use ice_domain_size, only: ncat - use ice_read_write, only: ice_open - - character(len=char_len_long), intent(in), optional :: ice_ic - - ! local variables - - character(len=char_len_long) :: & - filename, filename0 - - integer (kind=int_kind) :: status, status1 - - integer (kind=int_kind) :: iotype - - character(len=*), parameter :: subname = '(init_restart_read)' - - if (present(ice_ic)) then - filename = trim(ice_ic) - else - if (my_task == master_task) then - open(nu_rst_pointer,file=pointer_file) - read(nu_rst_pointer,'(a)') filename0 - filename = trim(filename0) - close(nu_rst_pointer) - write(nu_diag,*) 'Read ',pointer_file(1:lenstr(pointer_file)) - endif - call broadcast_scalar(filename, master_task) - endif - - if (my_task == master_task) then - write(nu_diag,*) 'Using restart dump=', trim(filename) - end if - -! if (restart_format(1:3) == 'pio') then - iotype = PIO_IOTYPE_NETCDF - if (restart_format == 'pio_pnetcdf') iotype = PIO_IOTYPE_PNETCDF - File%fh=-1 - call ice_pio_init(mode='read', filename=trim(filename), File=File, iotype=iotype) - - call ice_pio_initdecomp(iodesc=iodesc2d, precision=8) - call ice_pio_initdecomp(ndim3=ncat , iodesc=iodesc3d_ncat,remap=.true., precision=8) - - if (use_restart_time) then - status1 = PIO_noerr - status = pio_get_att(File, pio_global, 'istep1', istep0) -! status = pio_get_att(File, pio_global, 'time', time) -! status = pio_get_att(File, pio_global, 'time_forc', time_forc) - call pio_seterrorhandling(File, PIO_BCAST_ERROR) - status = pio_get_att(File, pio_global, 'myear', myear) - if (status /= PIO_noerr) status = pio_get_att(File, pio_global, 'nyr', myear) - if (status /= PIO_noerr) status1 = status - status = pio_get_att(File, pio_global, 'mmonth', mmonth) - if (status /= PIO_noerr) status = pio_get_att(File, pio_global, 'month', mmonth) - if (status /= PIO_noerr) status1 = status - status = pio_get_att(File, pio_global, 'mday', mday) - if (status /= PIO_noerr) status1 = status - status = pio_get_att(File, pio_global, 'msec', msec) - if (status /= PIO_noerr) status = pio_get_att(File, pio_global, 'sec', msec) - if (status /= PIO_noerr) status1 = status - if (status1 /= PIO_noerr) & - call abort_ice(subname//"ERROR: reading restart time ") - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) - endif ! use namelist values if use_restart_time = F -! endif - - if (my_task == master_task) then - write(nu_diag,*) 'Restart read at istep=',istep0,myear,mmonth,mday,msec - endif - - call broadcast_scalar(istep0,master_task) - call broadcast_scalar(myear,master_task) - call broadcast_scalar(mmonth,master_task) - call broadcast_scalar(mday,master_task) - call broadcast_scalar(msec,master_task) -! call broadcast_scalar(time,master_task) -! call broadcast_scalar(time_forc,master_task) - call broadcast_scalar(myear,master_task) - - istep1 = istep0 - - ! if runid is bering then need to correct npt for istep0 - if (trim(runid) == 'bering') then - npt = npt - istep0 - endif - - end subroutine init_restart_read - -!======================================================================= - -! Sets up restart file for writing. -! author David A Bailey, NCAR - - subroutine init_restart_write(filename_spec) - - use ice_calendar, only: msec, mmonth, mday, myear, istep1 - use ice_communicate, only: my_task, master_task - use ice_domain_size, only: nx_global, ny_global, ncat, nilyr, nslyr, & - n_iso, n_aero, nblyr, n_zaero, n_algae, n_doc, & - n_dic, n_don, n_fed, n_fep, nfsd - use ice_dyn_shared, only: kdyn - use ice_arrays_column, only: oceanmixed_ice - - logical (kind=log_kind) :: & - solve_zsal, skl_bgc, z_tracers - - logical (kind=log_kind) :: & - tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & - tr_pond_topo, tr_pond_lvl, tr_brine, tr_snow, & - tr_bgc_N, tr_bgc_C, tr_bgc_Nit, & - tr_bgc_Sil, tr_bgc_DMS, & - tr_bgc_chl, tr_bgc_Am, & - tr_bgc_PON, tr_bgc_DON, & - tr_zaero, tr_bgc_Fe, & - tr_bgc_hum, tr_fsd - - integer (kind=int_kind) :: & - nbtrcr - - character(len=char_len_long), intent(in), optional :: filename_spec - - ! local variables - - character(len=char_len_long) :: filename - - integer (kind=int_kind) :: dimid_ni, dimid_nj, dimid_ncat, & - dimid_nilyr, dimid_nslyr, dimid_naero - - integer (kind=int_kind), allocatable :: dims(:) - - integer (kind=int_kind) :: iotype - - integer (kind=int_kind) :: & - k, n, & ! loop index - status ! status variable from netCDF routine - - character (len=3) :: nchar, ncharb - - character(len=*), parameter :: subname = '(init_restart_write)' - - call icepack_query_tracer_sizes(nbtrcr_out=nbtrcr) - call icepack_query_tracer_flags( & - tr_iage_out=tr_iage, tr_FY_out=tr_FY, tr_lvl_out=tr_lvl, & - tr_iso_out=tr_iso, tr_aero_out=tr_aero, & - tr_pond_topo_out=tr_pond_topo, tr_pond_lvl_out=tr_pond_lvl, & - tr_snow_out=tr_snow, tr_brine_out=tr_brine, & - tr_bgc_N_out=tr_bgc_N, tr_bgc_C_out=tr_bgc_C, tr_bgc_Nit_out=tr_bgc_Nit, & - tr_bgc_Sil_out=tr_bgc_Sil, tr_bgc_DMS_out=tr_bgc_DMS, & - tr_bgc_chl_out=tr_bgc_chl, tr_bgc_Am_out=tr_bgc_Am, & - tr_bgc_PON_out=tr_bgc_PON, tr_bgc_DON_out=tr_bgc_DON, & - tr_zaero_out=tr_zaero, tr_bgc_Fe_out=tr_bgc_Fe, & - tr_bgc_hum_out=tr_bgc_hum, tr_fsd_out=tr_fsd) - call icepack_query_parameters(solve_zsal_out=solve_zsal, skl_bgc_out=skl_bgc, & - z_tracers_out=z_tracers) - call icepack_warnings_flush(nu_diag) - if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & - file=__FILE__, line=__LINE__) - - ! construct path/file - if (present(filename_spec)) then - filename = trim(filename_spec) - else - write(filename,'(a,a,a,i4.4,a,i2.2,a,i2.2,a,i5.5)') & - restart_dir(1:lenstr(restart_dir)), & - restart_file(1:lenstr(restart_file)),'.', & - myear,'-',mmonth,'-',mday,'-',msec - end if - - if (restart_format(1:3) /= 'bin') filename = trim(filename) // '.nc' - - ! write pointer (path/file) - if (my_task == master_task) then - open(nu_rst_pointer,file=pointer_file) - write(nu_rst_pointer,'(a)') filename - close(nu_rst_pointer) - endif - -! if (restart_format(1:3) == 'pio') then - - iotype = PIO_IOTYPE_NETCDF - if (restart_format == 'pio_pnetcdf') iotype = PIO_IOTYPE_PNETCDF - File%fh=-1 - call ice_pio_init(mode='write',filename=trim(filename), File=File, & - clobber=.true., cdf64=lcdf64, iotype=iotype) - - status = pio_put_att(File,pio_global,'istep1',istep1) -! status = pio_put_att(File,pio_global,'time',time) -! status = pio_put_att(File,pio_global,'time_forc',time_forc) - status = pio_put_att(File,pio_global,'myear',myear) - status = pio_put_att(File,pio_global,'mmonth',mmonth) - status = pio_put_att(File,pio_global,'mday',mday) - status = pio_put_att(File,pio_global,'msec',msec) - - status = pio_def_dim(File,'ni',nx_global,dimid_ni) - status = pio_def_dim(File,'nj',ny_global,dimid_nj) - status = pio_def_dim(File,'ncat',ncat,dimid_ncat) - - !----------------------------------------------------------------- - ! 2D restart fields - !----------------------------------------------------------------- - - allocate(dims(2)) - - dims(1) = dimid_ni - dims(2) = dimid_nj - - call define_rest_field(File,'uvel',dims) - call define_rest_field(File,'vvel',dims) - if (restart_coszen) call define_rest_field(File,'coszen',dims) - call define_rest_field(File,'scale_factor',dims) - call define_rest_field(File,'swvdr',dims) - call define_rest_field(File,'swvdf',dims) - call define_rest_field(File,'swidr',dims) - call define_rest_field(File,'swidf',dims) - - call define_rest_field(File,'strocnxT',dims) - call define_rest_field(File,'strocnyT',dims) - - call define_rest_field(File,'stressp_1',dims) - call define_rest_field(File,'stressp_2',dims) - call define_rest_field(File,'stressp_3',dims) - call define_rest_field(File,'stressp_4',dims) - - call define_rest_field(File,'stressm_1',dims) - call define_rest_field(File,'stressm_2',dims) - call define_rest_field(File,'stressm_3',dims) - call define_rest_field(File,'stressm_4',dims) - - call define_rest_field(File,'stress12_1',dims) - call define_rest_field(File,'stress12_2',dims) - call define_rest_field(File,'stress12_3',dims) - call define_rest_field(File,'stress12_4',dims) - - call define_rest_field(File,'iceumask',dims) - - if (oceanmixed_ice) then - call define_rest_field(File,'sst',dims) - call define_rest_field(File,'frzmlt',dims) - endif - - if (tr_FY) then - call define_rest_field(File,'frz_onset',dims) - end if - - if (kdyn == 2) then - call define_rest_field(File,'a11_1',dims) - call define_rest_field(File,'a11_2',dims) - call define_rest_field(File,'a11_3',dims) - call define_rest_field(File,'a11_4',dims) - call define_rest_field(File,'a12_1',dims) - call define_rest_field(File,'a12_2',dims) - call define_rest_field(File,'a12_3',dims) - call define_rest_field(File,'a12_4',dims) - endif - - if (tr_pond_lvl) then - call define_rest_field(File,'fsnow',dims) - endif - - if (nbtrcr > 0) then - if (tr_bgc_N) then - do k=1,n_algae - write(nchar,'(i3.3)') k - call define_rest_field(File,'algalN'//trim(nchar),dims) - enddo - endif - if (tr_bgc_C) then - do k=1,n_doc - write(nchar,'(i3.3)') k - call define_rest_field(File,'doc'//trim(nchar),dims) - enddo - do k=1,n_dic - write(nchar,'(i3.3)') k - call define_rest_field(File,'dic'//trim(nchar),dims) - enddo - endif - call define_rest_field(File,'nit' ,dims) - if (tr_bgc_Am) & - call define_rest_field(File,'amm' ,dims) - if (tr_bgc_Sil) & - call define_rest_field(File,'sil' ,dims) - if (tr_bgc_hum) & - call define_rest_field(File,'hum' ,dims) - if (tr_bgc_DMS) then - call define_rest_field(File,'dmsp' ,dims) - call define_rest_field(File,'dms' ,dims) - endif - if (tr_bgc_DON) then - do k=1,n_don - write(nchar,'(i3.3)') k - call define_rest_field(File,'don'//trim(nchar),dims) - enddo - endif - if (tr_bgc_Fe ) then - do k=1,n_fed - write(nchar,'(i3.3)') k - call define_rest_field(File,'fed'//trim(nchar),dims) - enddo - do k=1,n_fep - write(nchar,'(i3.3)') k - call define_rest_field(File,'fep'//trim(nchar),dims) - enddo - endif - if (tr_zaero) then - do k=1,n_zaero - write(nchar,'(i3.3)') k - call define_rest_field(File,'zaeros'//trim(nchar),dims) - enddo - endif - endif !nbtrcr - - if (solve_zsal) call define_rest_field(File,'sss',dims) - - deallocate(dims) - - !----------------------------------------------------------------- - ! 3D restart fields (ncat) - !----------------------------------------------------------------- - - allocate(dims(3)) - - dims(1) = dimid_ni - dims(2) = dimid_nj - dims(3) = dimid_ncat - - call define_rest_field(File,'aicen',dims) - call define_rest_field(File,'vicen',dims) - call define_rest_field(File,'vsnon',dims) - call define_rest_field(File,'Tsfcn',dims) - - if (tr_iage) then - call define_rest_field(File,'iage',dims) - end if - - if (tr_FY) then - call define_rest_field(File,'FY',dims) - end if - - if (tr_lvl) then - call define_rest_field(File,'alvl',dims) - call define_rest_field(File,'vlvl',dims) - end if - - if (tr_pond_topo) then - call define_rest_field(File,'apnd',dims) - call define_rest_field(File,'hpnd',dims) - call define_rest_field(File,'ipnd',dims) - end if - - if (tr_pond_lvl) then - call define_rest_field(File,'apnd',dims) - call define_rest_field(File,'hpnd',dims) - call define_rest_field(File,'ipnd',dims) - call define_rest_field(File,'dhs',dims) - call define_rest_field(File,'ffrac',dims) - end if - - if (tr_brine) then - call define_rest_field(File,'fbrn',dims) - call define_rest_field(File,'first_ice',dims) - endif - - if (skl_bgc) then - do k = 1, n_algae - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_N'//trim(nchar) ,dims) - enddo - if (tr_bgc_C) then - ! do k = 1, n_algae - ! write(nchar,'(i3.3)') k - ! call define_rest_field(File,'bgc_C'//trim(nchar) ,dims) - ! enddo - do k = 1, n_doc - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DOC'//trim(nchar) ,dims) - enddo - do k = 1, n_dic - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DIC'//trim(nchar) ,dims) - enddo - endif - if (tr_bgc_chl) then - do k = 1, n_algae - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_chl'//trim(nchar) ,dims) - enddo - endif - call define_rest_field(File,'bgc_Nit' ,dims) - if (tr_bgc_Am) & - call define_rest_field(File,'bgc_Am' ,dims) - if (tr_bgc_Sil) & - call define_rest_field(File,'bgc_Sil' ,dims) - if (tr_bgc_hum) & - call define_rest_field(File,'bgc_hum' ,dims) - if (tr_bgc_DMS) then - call define_rest_field(File,'bgc_DMSPp',dims) - call define_rest_field(File,'bgc_DMSPd',dims) - call define_rest_field(File,'bgc_DMS' ,dims) - endif - if (tr_bgc_PON) & - call define_rest_field(File,'bgc_PON' ,dims) - if (tr_bgc_DON) then - do k = 1, n_don - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DON'//trim(nchar) ,dims) - enddo - endif - if (tr_bgc_Fe ) then - do k = 1, n_fed - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Fed'//trim(nchar) ,dims) - enddo - do k = 1, n_fep - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Fep'//trim(nchar) ,dims) - enddo - endif - endif !skl_bgc - if (solve_zsal) & - call define_rest_field(File,'Rayleigh',dims) - - !----------------------------------------------------------------- - ! 4D restart fields, written as layers of 3D - !----------------------------------------------------------------- - - do k=1,nilyr - write(nchar,'(i3.3)') k - call define_rest_field(File,'sice'//trim(nchar),dims) - call define_rest_field(File,'qice'//trim(nchar),dims) - enddo - - do k=1,nslyr - write(nchar,'(i3.3)') k - call define_rest_field(File,'qsno'//trim(nchar),dims) - enddo - - if (tr_snow) then - do k=1,nslyr - write(nchar,'(i3.3)') k - call define_rest_field(File,'smice'//trim(nchar),dims) - call define_rest_field(File,'smliq'//trim(nchar),dims) - call define_rest_field(File, 'rhos'//trim(nchar),dims) - call define_rest_field(File, 'rsnw'//trim(nchar),dims) - enddo - endif - - if (tr_fsd) then - do k=1,nfsd - write(nchar,'(i3.3)') k - call define_rest_field(File,'fsd'//trim(nchar),dims) - enddo - endif - - if (tr_iso) then - do k=1,n_iso - write(nchar,'(i3.3)') k - call define_rest_field(File,'isosno'//nchar, dims) - call define_rest_field(File,'isoice'//nchar, dims) - enddo - endif - - if (tr_aero) then - do k=1,n_aero - write(nchar,'(i3.3)') k - call define_rest_field(File,'aerosnossl'//nchar, dims) - call define_rest_field(File,'aerosnoint'//nchar, dims) - call define_rest_field(File,'aeroicessl'//nchar, dims) - call define_rest_field(File,'aeroiceint'//nchar, dims) - enddo - endif - - if (solve_zsal) then - do k = 1, nblyr - write(nchar,'(i3.3)') k - call define_rest_field(File,'zSalinity'//trim(nchar),dims) - enddo - endif - if (z_tracers) then - if (tr_zaero) then - do n = 1, n_zaero - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'zaero'//trim(ncharb)//trim(nchar),dims) - enddo !k - enddo !n - endif !tr_zaero - if (tr_bgc_Nit) then - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Nit'//trim(nchar),dims) - enddo - endif - if (tr_bgc_N) then - do n = 1, n_algae - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_N'//trim(ncharb)//trim(nchar),dims) - enddo - enddo - endif - if (tr_bgc_C) then - ! do n = 1, n_algae - ! write(ncharb,'(i3.3)') n - ! do k = 1, nblyr+3 - ! write(nchar,'(i3.3)') k - ! call - ! define_rest_field(File,'bgc_C'//trim(ncharb)//trim(nchar),dims) - ! enddo - ! enddo - do n = 1, n_doc - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DOC'//trim(ncharb)//trim(nchar),dims) - enddo - enddo - do n = 1, n_dic - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DIC'//trim(ncharb)//trim(nchar),dims) - enddo - enddo - endif - if (tr_bgc_chl) then - do n = 1, n_algae - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_chl'//trim(ncharb)//trim(nchar),dims) - enddo - enddo - endif - if (tr_bgc_Am) then - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Am'//trim(nchar),dims) - enddo - endif - if (tr_bgc_Sil) then - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Sil'//trim(nchar),dims) - enddo - endif - if (tr_bgc_hum) then - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_hum'//trim(nchar),dims) - enddo - endif - if (tr_bgc_DMS) then - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DMSPp'//trim(nchar),dims) - call define_rest_field(File,'bgc_DMSPd'//trim(nchar),dims) - call define_rest_field(File,'bgc_DMS'//trim(nchar),dims) - enddo - endif - if (tr_bgc_PON) then - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_PON'//trim(nchar),dims) - enddo - endif - if (tr_bgc_DON) then - do n = 1, n_don - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_DON'//trim(ncharb)//trim(nchar),dims) - enddo - enddo - endif - if (tr_bgc_Fe ) then - do n = 1, n_fed - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Fed'//trim(ncharb)//trim(nchar),dims) - enddo - enddo - do n = 1, n_fep - write(ncharb,'(i3.3)') n - do k = 1, nblyr+3 - write(nchar,'(i3.3)') k - call define_rest_field(File,'bgc_Fep'//trim(ncharb)//trim(nchar),dims) - enddo - enddo - endif - do k = 1, nbtrcr - write(nchar,'(i3.3)') k - call define_rest_field(File,'zbgc_frac'//trim(nchar),dims) - enddo - endif !z_tracers - - deallocate(dims) - status = pio_enddef(File) - - call ice_pio_initdecomp(iodesc=iodesc2d, precision=8) - call ice_pio_initdecomp(ndim3=ncat , iodesc=iodesc3d_ncat, remap=.true., precision=8) - -! endif ! restart_format - - if (my_task == master_task) then - write(nu_diag,*) 'Writing ',filename(1:lenstr(filename)) - endif - - end subroutine init_restart_write - -!======================================================================= - -! Reads a single restart field -! author David A Bailey, NCAR - - subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3,diag, & - field_loc, field_type) - - use ice_blocks, only: nx_block, ny_block - use ice_communicate, only: my_task, master_task - use ice_constants, only: c0, field_loc_center - use ice_boundary, only: ice_HaloUpdate - use ice_domain, only: halo_info, distrb_info, nblocks - use ice_domain_size, only: max_blocks, ncat - use ice_global_reductions, only: global_minval, global_maxval, global_sum - - integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - ndim3 , & ! third dimension - nrec ! record number (0 for sequential access) - - real (kind=dbl_kind), dimension(nx_block,ny_block,ndim3,max_blocks), intent(inout) :: & - work ! input array (real, 8-byte) - - character (len=4), intent(in) :: & - atype ! format for output array - ! (real/integer, 4-byte/8-byte) - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - character (len=*), intent(in) :: vname - - integer (kind=int_kind), optional, intent(in) :: & - field_loc, & ! location of field on staggered grid - field_type ! type of field (scalar, vector, angle) - - ! local variables - - integer (kind=int_kind) :: & - j, & ! dimension counter - n, & ! number of dimensions for variable - ndims, & ! number of variable dimensions - status ! status variable from netCDF routine - - real (kind=dbl_kind) :: amin,amax,asum - - character(len=*), parameter :: subname = '(read_restart_field)' - -! if (restart_format(1:3) == "pio") then - if (my_task == master_task) & - write(nu_diag,*)'Parallel restart file read: ',vname - - call pio_seterrorhandling(File, PIO_BCAST_ERROR) - - status = pio_inq_varid(File,trim(vname),vardesc) - - if (status /= PIO_noerr) then - call abort_ice(subname// & - "ERROR: CICE restart? Missing variable: "//trim(vname)) - endif - - status = pio_inq_varndims(File, vardesc, ndims) - - call pio_seterrorhandling(File, PIO_INTERNAL_ERROR) - -! if (ndim3 == ncat .and. ncat>1) then - if (ndim3 == ncat .and. ndims == 3) then - call pio_read_darray(File, vardesc, iodesc3d_ncat, work, status) -!#ifndef CESM1_PIO -!! This only works for PIO2 -! where (work == PIO_FILL_DOUBLE) work = c0 -!#endif - if (present(field_loc)) then - do n=1,ndim3 - call ice_HaloUpdate (work(:,:,n,:), halo_info, & - field_loc, field_type) - enddo - endif -! elseif (ndim3 == 1) then - elseif (ndim3 == 1 .and. ndims == 2) then - call pio_read_darray(File, vardesc, iodesc2d, work, status) -!#ifndef CESM1_PIO -!! This only works for PIO2 -! where (work == PIO_FILL_DOUBLE) work = c0 -!#endif - if (present(field_loc)) then - call ice_HaloUpdate (work(:,:,1,:), halo_info, & - field_loc, field_type) - endif - else - write(nu_diag,*) "ndim3 not supported ",ndim3 - endif - - if (diag) then - if (ndim3 > 1) then - do n=1,ndim3 - amin = global_minval(work(:,:,n,:),distrb_info) - amax = global_maxval(work(:,:,n,:),distrb_info) - asum = global_sum(work(:,:,n,:), distrb_info, field_loc_center) - if (my_task == master_task) then - write(nu_diag,*) ' min and max =', amin, amax - write(nu_diag,*) ' sum =',asum - endif - enddo - else - amin = global_minval(work(:,:,1,:),distrb_info) - amax = global_maxval(work(:,:,1,:),distrb_info) - asum = global_sum(work(:,:,1,:), distrb_info, field_loc_center) - if (my_task == master_task) then - write(nu_diag,*) ' min and max =', amin, amax - write(nu_diag,*) ' sum =',asum - write(nu_diag,*) '' - endif - endif - - endif -! else -! call abort_ice(subname//"ERROR: Invalid restart_format: "//trim(restart_format)) -! endif ! restart_format - - end subroutine read_restart_field - -!======================================================================= - -! Writes a single restart field. -! author David A Bailey, NCAR - - subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) - - use ice_blocks, only: nx_block, ny_block - use ice_communicate, only: my_task, master_task - use ice_constants, only: c0, field_loc_center - use ice_domain, only: distrb_info, nblocks - use ice_domain_size, only: max_blocks, ncat - use ice_global_reductions, only: global_minval, global_maxval, global_sum - - integer (kind=int_kind), intent(in) :: & - nu , & ! unit number - ndim3 , & ! third dimension - nrec ! record number (0 for sequential access) - - real (kind=dbl_kind), dimension(nx_block,ny_block,ndim3,max_blocks), intent(in) :: & - work ! input array (real, 8-byte) - - character (len=4), intent(in) :: & - atype ! format for output array - ! (real/integer, 4-byte/8-byte) - - logical (kind=log_kind), intent(in) :: & - diag ! if true, write diagnostic output - - character (len=*), intent(in) :: vname - - ! local variables - - integer (kind=int_kind) :: & - j, & ! dimension counter - n, & ! dimension counter - ndims, & ! number of variable dimensions - status ! status variable from netCDF routine - - real (kind=dbl_kind) :: amin,amax,asum - - character(len=*), parameter :: subname = '(write_restart_field)' - -! if (restart_format(1:3) == "pio") then - if (my_task == master_task) & - write(nu_diag,*)'Parallel restart file write: ',vname - - status = pio_inq_varid(File,trim(vname),vardesc) - - status = pio_inq_varndims(File, vardesc, ndims) - - if (ndims==3) then - call pio_write_darray(File, vardesc, iodesc3d_ncat,work(:,:,:,1:nblocks), & - status, fillval=c0) - elseif (ndims == 2) then - call pio_write_darray(File, vardesc, iodesc2d, work(:,:,1,1:nblocks), & - status, fillval=c0) - else - write(nu_diag,*) "ndims not supported",ndims,ndim3 - endif - - if (diag) then - if (ndim3 > 1) then - do n=1,ndim3 - amin = global_minval(work(:,:,n,:),distrb_info) - amax = global_maxval(work(:,:,n,:),distrb_info) - asum = global_sum(work(:,:,n,:), distrb_info, field_loc_center) - if (my_task == master_task) then - write(nu_diag,*) ' min and max =', amin, amax - write(nu_diag,*) ' sum =',asum - endif - enddo - else - amin = global_minval(work(:,:,1,:),distrb_info) - amax = global_maxval(work(:,:,1,:),distrb_info) - asum = global_sum(work(:,:,1,:), distrb_info, field_loc_center) - if (my_task == master_task) then - write(nu_diag,*) ' min and max =', amin, amax - write(nu_diag,*) ' sum =',asum - endif - endif - endif -! else -! call abort_ice(subname//"ERROR: Invalid restart_format: "//trim(restart_format)) -! endif - - end subroutine write_restart_field - -!======================================================================= - -! Finalize the restart file. -! author David A Bailey, NCAR - - subroutine final_restart() - - use ice_calendar, only: istep1, idate, msec - use ice_communicate, only: my_task, master_task - - character(len=*), parameter :: subname = '(final_restart)' - - call PIO_freeDecomp(File,iodesc2d) - call PIO_freeDecomp(File,iodesc3d_ncat) - call pio_closefile(File) - - if (my_task == master_task) & - write(nu_diag,*) 'Restart read/written ',istep1,idate,msec - - end subroutine final_restart - -!======================================================================= - -! Defines a restart field -! author David A Bailey, NCAR - - subroutine define_rest_field(File, vname, dims) - - type(file_desc_t) , intent(in) :: File - character (len=*) , intent(in) :: vname - integer (kind=int_kind), intent(in) :: dims(:) - - integer (kind=int_kind) :: & - status ! status variable from netCDF routine - - character(len=*), parameter :: subname = '(define_rest_field)' - - status = pio_def_var(File,trim(vname),pio_double,dims,vardesc) - - end subroutine define_rest_field - -!======================================================================= - - end module ice_restart - -!======================================================================= diff --git a/cicecore/cicedynB~e628a9a (Update CICE for latest Consortium_main (#56)) b/cicecore/cicedynB~e628a9a (Update CICE for latest Consortium_main (#56)) new file mode 120000 index 000000000..70695ca4b --- /dev/null +++ b/cicecore/cicedynB~e628a9a (Update CICE for latest Consortium_main (#56)) @@ -0,0 +1 @@ +cicedyn \ No newline at end of file diff --git a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 index 0b8ed689e..85050d8c9 100644 --- a/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE_InitMod.F90 @@ -70,9 +70,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -111,7 +112,6 @@ subroutine cice_init call alloc_grid ! allocate grid call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state - call alloc_dyn_shared ! allocate dyn shared (init_uvel,init_vvel) call alloc_flux_bgc ! allocate flux_bgc call alloc_flux ! allocate flux call init_ice_timers ! initialize all timers @@ -122,9 +122,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables @@ -262,7 +262,7 @@ subroutine init_restart nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, & nt_iage, nt_FY, nt_aero, nt_fsd - character(len=*),parameter :: subname = '(init_restart)' + character(len=*), parameter :: subname = '(init_restart)' call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_warnings_flush(nu_diag) diff --git a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 index 0b8ed689e..85050d8c9 100644 --- a/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 +++ b/cicecore/drivers/direct/nemo_concepts/CICE_InitMod.F90 @@ -70,9 +70,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -111,7 +112,6 @@ subroutine cice_init call alloc_grid ! allocate grid call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state - call alloc_dyn_shared ! allocate dyn shared (init_uvel,init_vvel) call alloc_flux_bgc ! allocate flux_bgc call alloc_flux ! allocate flux call init_ice_timers ! initialize all timers @@ -122,9 +122,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables @@ -262,7 +262,7 @@ subroutine init_restart nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, & nt_iage, nt_FY, nt_aero, nt_fsd - character(len=*),parameter :: subname = '(init_restart)' + character(len=*), parameter :: subname = '(init_restart)' call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_warnings_flush(nu_diag) diff --git a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 index a8bf96ad2..5efa18a28 100644 --- a/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 +++ b/cicecore/drivers/mct/cesm1/CICE_InitMod.F90 @@ -72,9 +72,10 @@ subroutine cice_init(mpicom_ice) use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -125,7 +126,6 @@ subroutine cice_init(mpicom_ice) call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -135,9 +135,9 @@ subroutine cice_init(mpicom_ice) call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 index 5fbde9cce..0ba672f3d 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/cmeps/CICE_InitMod.F90 @@ -36,7 +36,6 @@ subroutine cice_init1() use ice_domain , only: init_domain_blocks use ice_arrays_column , only: alloc_arrays_column use ice_state , only: alloc_state - use ice_dyn_shared , only: alloc_dyn_shared use ice_flux_bgc , only: alloc_flux_bgc use ice_flux , only: alloc_flux use ice_timers , only: timer_total, init_ice_timers, ice_timer_start @@ -59,7 +58,6 @@ subroutine cice_init1() call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -79,9 +77,10 @@ subroutine cice_init2() use ice_communicate , only: my_task, master_task use ice_diagnostics , only: init_diags use ice_domain_size , only: ncat, nfsd, nfreq - use ice_dyn_eap , only: init_eap, alloc_dyn_eap - use ice_dyn_shared , only: kdyn, init_dyn + use ice_dyn_eap , only: init_eap + use ice_dyn_evp , only: init_evp use ice_dyn_vp , only: init_vp + use ice_dyn_shared , only: kdyn use ice_flux , only: init_coupler_flux, init_history_therm use ice_flux , only: init_history_dyn, init_flux_atm, init_flux_ocn use ice_forcing , only: init_snowtable @@ -107,9 +106,9 @@ subroutine cice_init2() call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp ! define evp dynamics parameters, variables + elseif (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 index afdee5590..b94fcff05 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 @@ -21,7 +21,7 @@ module ice_comp_nuopc use ice_import_export , only : ice_import, ice_export, ice_advertise_fields, ice_realize_fields use ice_domain_size , only : nx_global, ny_global - use ice_grid , only : grid_type, init_grid2 + use ice_grid , only : grid_format, init_grid2 use ice_communicate , only : init_communicate, my_task, master_task, mpi_comm_ice use ice_calendar , only : force_restart_now, write_ic, init_calendar use ice_calendar , only : idate, mday, mmonth, myear, year_init @@ -576,7 +576,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) call t_startf ('cice_init1') call cice_init1 call t_stopf ('cice_init1') - + !----------------------------------------------------------------- ! Advertise fields !----------------------------------------------------------------- @@ -684,7 +684,7 @@ subroutine InitializeAdvertise(gcomp, importState, exportState, clock, rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return ! Initialize the cice mesh and the cice mask - if (trim(grid_type) == 'setmask') then + if (trim(grid_format) == 'meshnc') then ! In this case cap code determines the mask file call ice_mesh_setmask_from_maskfile(ice_maskfile, ice_mesh, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return diff --git a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 index 2597dd88c..d991f9a64 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_import_export.F90 @@ -27,7 +27,7 @@ module ice_import_export use ice_arrays_column , only : floe_rad_c, wave_spectrum use ice_state , only : vice, vsno, aice, aicen_init, trcr, trcrn use ice_grid , only : tlon, tlat, tarea, tmask, anglet, hm - use ice_grid , only : grid_type + use ice_grid , only : grid_format use ice_mesh_mod , only : ocn_gridcell_frac use ice_boundary , only : ice_HaloUpdate use ice_fileunits , only : nu_diag, flush_fileunit @@ -1067,7 +1067,7 @@ subroutine ice_export( exportState, rc ) call state_setexport(exportState, 'ice_fraction', input=ailohi, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return - if (trim(grid_type) == 'setmask') then + if (trim(grid_format) == 'meshnc') then call state_setexport(exportState, 'ice_mask', input=ocn_gridcell_frac, rc=rc) if (ChkErr(rc,__LINE__,u_FILE_u)) return else diff --git a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 index 22596429d..dc83c7703 100644 --- a/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE_InitMod.F90 @@ -15,6 +15,7 @@ module CICE_InitMod use ice_kinds_mod use ice_exit, only: abort_ice use ice_fileunits, only: init_fileunits, nu_diag + use ice_memusage, only: ice_memusage_init, ice_memusage_print use icepack_intfc, only: icepack_aggregate use icepack_intfc, only: icepack_init_itd, icepack_init_itd_hist use icepack_intfc, only: icepack_init_fsd_bounds, icepack_init_wave @@ -47,9 +48,9 @@ subroutine CICE_Initialize(mpi_comm) integer (kind=int_kind), optional, intent(in) :: mpi_comm ! communicator from nuopc character(len=*), parameter :: subname='(CICE_Initialize)' - !-------------------------------------------------------------------- - ! model initialization - !-------------------------------------------------------------------- + !-------------------------------------------------------------------- + ! model initialization + !-------------------------------------------------------------------- if (present(mpi_comm)) then call cice_init(mpi_comm) @@ -70,15 +71,16 @@ subroutine cice_init(mpi_comm) floe_binwidth, c_fsd_range use ice_state, only: alloc_state use ice_flux_bgc, only: alloc_flux_bgc - use ice_calendar, only: dt, dt_dyn, istep, istep1, write_ic, & + use ice_calendar, only: dt, dt_dyn, write_ic, & init_calendar, advance_timestep, calc_timesteps use ice_communicate, only: init_communicate, my_task, master_task use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -122,12 +124,17 @@ subroutine cice_init(mpi_comm) call input_zbgc ! vertical biogeochemistry namelist call count_tracers ! count tracers + ! Call this as early as possible, must be after memory_stats is read + if (my_task == master_task) then + call ice_memusage_init(nu_diag) + call ice_memusage_print(nu_diag,subname//':start') + endif + call init_domain_blocks ! set up block decomposition call init_grid1 ! domain distribution call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -137,9 +144,9 @@ subroutine cice_init(mpi_comm) call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables @@ -254,6 +261,10 @@ subroutine cice_init(mpi_comm) if (write_ic) call accum_hist(dt) ! write initial conditions + if (my_task == master_task) then + call ice_memusage_print(nu_diag,subname//':end') + endif + end subroutine cice_init !======================================================================= diff --git a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 index f4a7a2ef1..22c16d8b0 100644 --- a/cicecore/drivers/standalone/cice/CICE_InitMod.F90 +++ b/cicecore/drivers/standalone/cice/CICE_InitMod.F90 @@ -71,9 +71,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -115,7 +116,6 @@ subroutine cice_init call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -125,9 +125,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 index 84d1a3a60..9ed1c5cbc 100644 --- a/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/gridavgchk/CICE_InitMod.F90 @@ -70,9 +70,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & @@ -113,7 +114,6 @@ subroutine cice_init call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -123,9 +123,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 index 84d1a3a60..8a5070d25 100644 --- a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 +++ b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 @@ -70,8 +70,10 @@ subroutine cice_init use ice_diagnostics, only: init_diags use ice_domain, only: init_domain_blocks use ice_domain_size, only: ncat, nfsd - use ice_dyn_eap, only: init_eap, alloc_dyn_eap - use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_eap, only: init_eap + use ice_dyn_evp, only: init_evp + use ice_dyn_vp, only: init_vp + use ice_dyn_shared, only: kdyn use ice_dyn_vp, only: init_vp use ice_flux, only: init_coupler_flux, init_history_therm, & init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux @@ -113,7 +115,6 @@ subroutine cice_init call alloc_grid ! allocate grid arrays call alloc_arrays_column ! allocate column arrays call alloc_state ! allocate state arrays - call alloc_dyn_shared ! allocate dyn shared arrays call alloc_flux_bgc ! allocate flux_bgc arrays call alloc_flux ! allocate flux arrays call init_ice_timers ! initialize all timers @@ -123,9 +124,9 @@ subroutine cice_init call init_calendar ! initialize some calendar stuff call init_hist (dt) ! initialize output history file - call init_dyn (dt_dyn) ! define dynamics parameters, variables - if (kdyn == 2) then - call alloc_dyn_eap ! allocate dyn_eap arrays + if (kdyn == 1) then + call init_evp + else if (kdyn == 2) then call init_eap ! define eap dynamics parameters, variables else if (kdyn == 3) then call init_vp ! define vp dynamics parameters, variables diff --git a/cicecore/shared/ice_fileunits.F90 b/cicecore/shared/ice_fileunits.F90 index d4823d175..72a40f513 100644 --- a/cicecore/shared/ice_fileunits.F90 +++ b/cicecore/shared/ice_fileunits.F90 @@ -328,32 +328,32 @@ end subroutine flush_fileunit subroutine goto_nml(iunit, nml, status) ! Search to namelist group within ice_in file. ! for compilers that do not allow optional namelists - + ! passed variables integer(kind=int_kind), intent(in) :: & iunit ! namelist file unit - + character(len=*), intent(in) :: & nml ! namelist to search for - + integer(kind=int_kind), intent(out) :: & status ! status of subrouine - + ! local variables character(len=char_len) :: & file_str, & ! string in file nml_str ! namelist string to test - + integer(kind=int_kind) :: & i, n ! dummy integers - - + + ! rewind file rewind(iunit) - + ! define test string with ampersand nml_str = '&' // trim(adjustl(nml)) - + ! search for the record containing the namelist group we're looking for do read(iunit, '(a)', iostat=status) file_str @@ -365,10 +365,10 @@ subroutine goto_nml(iunit, nml, status) end if end if end do - + ! backspace to namelist name in file backspace(iunit) - + end subroutine goto_nml !======================================================================= diff --git a/cicecore/shared/ice_init_column.F90 b/cicecore/shared/ice_init_column.F90 index 06ab79cdb..0d06b0aac 100644 --- a/cicecore/shared/ice_init_column.F90 +++ b/cicecore/shared/ice_init_column.F90 @@ -1477,6 +1477,14 @@ subroutine input_zbgc restart_zsal = .false. endif + if (solve_zsal) then + if (my_task == master_task) then + write(nu_diag,*) subname,' ERROR: solve_zsal=T deprecated' + endif + abort_flag = 101 + endif + +#ifdef UNDEPRECATE_ZSAL if (solve_zsal .and. nblyr < 1) then if (my_task == master_task) then write(nu_diag,*) subname,' ERROR: solve_zsal=T but 0 zsalinity tracers' @@ -1490,6 +1498,7 @@ subroutine input_zbgc endif abort_flag = 102 endif +#endif if (tr_brine .and. nblyr < 1 ) then if (my_task == master_task) then diff --git a/cicecore/version.txt b/cicecore/version.txt index 154cda3d7..953395fa1 100644 --- a/cicecore/version.txt +++ b/cicecore/version.txt @@ -1 +1 @@ -CICE 6.4.0 +CICE 6.4.1 diff --git a/configuration/scripts/cice.build b/configuration/scripts/cice.build index d75d74253..66b7b1321 100755 --- a/configuration/scripts/cice.build +++ b/configuration/scripts/cice.build @@ -128,12 +128,12 @@ endif ### List of source code directories (in order of importance). cat >! Filepath << EOF ${ICE_SANDBOX}/cicecore/drivers/${ICE_DRVOPT} -${ICE_SANDBOX}/cicecore/cicedynB/dynamics -${ICE_SANDBOX}/cicecore/cicedynB/general -${ICE_SANDBOX}/cicecore/cicedynB/analysis -${ICE_SANDBOX}/cicecore/cicedynB/infrastructure -${ICE_SANDBOX}/cicecore/cicedynB/infrastructure/io/$IODIR -${ICE_SANDBOX}/cicecore/cicedynB/infrastructure/comm/${ICE_COMMDIR} +${ICE_SANDBOX}/cicecore/cicedyn/dynamics +${ICE_SANDBOX}/cicecore/cicedyn/general +${ICE_SANDBOX}/cicecore/cicedyn/analysis +${ICE_SANDBOX}/cicecore/cicedyn/infrastructure +${ICE_SANDBOX}/cicecore/cicedyn/infrastructure/io/$IODIR +${ICE_SANDBOX}/cicecore/cicedyn/infrastructure/comm/${ICE_COMMDIR} ${ICE_SANDBOX}/cicecore/shared ${ICE_SANDBOX}/icepack/columnphysics EOF diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 8262f34ec..32db0270b 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -254,6 +254,8 @@ update_ocn_f = .false. l_mpond_fresh = .false. tfrz_option = 'mushy' + saltflux_option = 'constant' + ice_ref_salinity = 4.0 oceanmixed_ice = .true. wave_spec_type = 'none' wave_spec_file = 'unknown_wave_spec_file' diff --git a/configuration/scripts/tests/base_suite.ts b/configuration/scripts/tests/base_suite.ts index 3007380ab..8685ab9a8 100644 --- a/configuration/scripts/tests/base_suite.ts +++ b/configuration/scripts/tests/base_suite.ts @@ -22,6 +22,7 @@ restart gx3 4x4 alt04 restart gx3 4x4 alt05 restart gx3 8x2 alt06 restart gx3 8x3 alt07 +restart gx3 8x3 saltflux restart gx3 18x2 debug,maskhalo restart gx3 6x2 alt01,debug,short restart gx3 8x2 alt02,debug,short @@ -70,8 +71,6 @@ restart gx3 4x4 histall,precision8,cdf64 smoke gx3 30x1 bgcz,histall smoke gx3 14x2 fsd12,histall smoke gx3 4x1 dynpicard -smoke gx3 8x2 diag24,run5day,zsal,debug -restart gx3 8x2 zsal restart gx3 8x2 gx3ncarbulk,debug restart gx3 4x4 gx3ncarbulk,diag1 smoke gx3 4x1 calcdragio diff --git a/configuration/scripts/tests/decomp_suite.ts b/configuration/scripts/tests/decomp_suite.ts index 9c82c5d27..8d47506d6 100644 --- a/configuration/scripts/tests/decomp_suite.ts +++ b/configuration/scripts/tests/decomp_suite.ts @@ -3,7 +3,8 @@ restart gx3 4x2x25x29x4 dslenderX2 restart gx1 64x1x16x16x10 dwghtfile restart gbox180 16x1x6x6x60 dspacecurve,debugblocks decomp gx3 4x2x25x29x5 none -sleep 30 +decomp gx3 4x2x25x29x5 dynpicard,reprosum +decomp gx3 4x2x25x29x5 dyneap restart gx3 1x1x50x58x4 droundrobin,thread restart_gx3_4x2x25x29x4_dslenderX2 restart gx3 4x1x25x116x1 dslenderX1,thread restart_gx3_4x2x25x29x4_dslenderX2 restart gx3 6x2x4x29x18 dspacecurve restart_gx3_4x2x25x29x4_dslenderX2 @@ -27,7 +28,6 @@ restart gx3 8x1x25x29x4 drakeX2,thread restart_gx3_4x2x25x2 smoke gx3 4x2x25x29x4 debug,run2day,dslenderX2 smoke gx1 64x1x16x16x10 debug,run2day,dwghtfile smoke gbox180 16x1x6x6x60 debug,run2day,dspacecurve,debugblocks -sleep 30 smoke gx3 1x1x25x58x8 debug,run2day,droundrobin,thread smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day smoke gx3 20x1x5x116x1 debug,run2day,dslenderX1,thread smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day smoke gx3 6x2x4x29x18 debug,run2day,dspacecurve smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day diff --git a/configuration/scripts/tests/first_suite.ts b/configuration/scripts/tests/first_suite.ts index b42d917ea..bef24d9eb 100644 --- a/configuration/scripts/tests/first_suite.ts +++ b/configuration/scripts/tests/first_suite.ts @@ -1,6 +1,19 @@ # Test Grid PEs Sets BFB-compare smoke gx3 8x2 diag1,run5day +# decomp_suite restart gx3 4x2x25x29x4 dslenderX2 smoke gx3 4x2x25x29x4 debug,run2day,dslenderX2 -smoke gx3 4x2x25x29x4 dslenderX2,diag1,reprosum,cmplog +# reprosum_suite +smoke gx3 4x2x25x29x4 dslenderX2,diag1,reprosum +# travis_suite smoke gx3 1x2 run2day +# gridsys_suite +smoke gx3 1x1x100x116x1 reprosum,run10day +smoke gx1 32x1x16x16x32 reprosum,run10day +smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd +smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd +smoke gx3 1x1x100x116x1 reprosum,run10day,gridc +smoke gx1 32x1x16x16x32 reprosum,run10day,gridc +# perf_suite +smoke gx1 32x1x16x16x15 run2day,droundrobin +smoke gx1 64x1x16x16x8 run2day,droundrobin,thread diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index d9752073f..faf01344a 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -1,4 +1,11 @@ # Test Grid PEs Sets BFB-compare +smoke gx3 1x1x100x116x1 reprosum,run10day +smoke gx1 32x1x16x16x32 reprosum,run10day +smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd +smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd +smoke gx3 1x1x100x116x1 reprosum,run10day,gridc +smoke gx1 32x1x16x16x32 reprosum,run10day,gridc + smoke gx3 8x2 diag1,run5day smoke gx3 8x4 diag1,run5day,debug restart gx3 4x2 debug,diag1 @@ -12,11 +19,9 @@ smoke gbox80 4x2 boxclosed,boxforcee,run1day smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid -smoke gx3 1x1x100x116x1 reprosum,run10day smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day -smoke gx1 32x1x16x16x32 reprosum,run10day smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16x32_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16x32_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest smoke_gx1_32x1x16x16x32_reprosum_run10day @@ -34,11 +39,9 @@ smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridcd smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridcd smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridcd smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid,gridcd -smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day -smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day @@ -56,11 +59,9 @@ smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridc smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridc smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridc smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid,gridc -smoke gx3 1x1x100x116x1 reprosum,run10day,gridc smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day smoke gx3 1x1x5x4x580 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day -smoke gx1 32x1x16x16x32 reprosum,run10day,gridc smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day diff --git a/configuration/scripts/tests/omp_suite.ts b/configuration/scripts/tests/omp_suite.ts index 686fa72db..62630e874 100644 --- a/configuration/scripts/tests/omp_suite.ts +++ b/configuration/scripts/tests/omp_suite.ts @@ -1,5 +1,7 @@ # Test Grid PEs Sets BFB-compare +#gridB + smoke gx3 8x4 diag1,reprosum,run10day smoke gx3 6x2 alt01,reprosum,run10day smoke gx3 8x2 alt02,reprosum,run10day @@ -15,7 +17,6 @@ smoke gx3 14x2 fsd12,reprosum,run10day smoke gx3 11x2 isotope,reprosum,run10day smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day smoke gx3 6x4 dynpicard,reprosum,run10day -smoke gx3 8x3 zsal,reprosum,run10day smoke gx3 1x1x100x116x1 reprosum,run10day,thread smoke gbox128 8x2 reprosum,run10day @@ -25,6 +26,62 @@ smoke gbox128 14x2 boxrestore,reprosum,run10day smoke gbox80 4x5 box2001,reprosum,run10day smoke gbox80 11x3 boxslotcyl,reprosum,run10day +#gridC + +smoke gx3 8x4 diag1,reprosum,run10day,gridc +smoke gx3 6x2 alt01,reprosum,run10day,gridc +smoke gx3 8x2 alt02,reprosum,run10day,gridc +#smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridc +smoke gx3 4x4 alt04,reprosum,run10day,gridc +smoke gx3 4x4 alt05,reprosum,run10day,gridc +smoke gx3 8x2 alt06,reprosum,run10day,gridc +smoke gx3 7x2 alt07,reprosum,run10day,gridc +smoke gx3 8x2 bgczm,reprosum,run10day,gridc +smoke gx1 15x2 reprosum,run10day,gridc +smoke gx1 15x2 seabedprob,reprosum,run10day,gridc +smoke gx3 14x2 fsd12,reprosum,run10day,gridc +smoke gx3 11x2 isotope,reprosum,run10day,gridc +smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridc +#smoke gx3 6x4 dynpicard,reprosum,run10day,gridc +smoke gx3 1x1x100x116x1 reprosum,run10day,gridc,thread + +smoke gbox128 8x2 reprosum,run10day,gridc +smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridc +#smoke gbox128 9x2 boxadv,reprosum,run10day,gridc +smoke gbox128 14x2 boxrestore,reprosum,run10day,gridc +smoke gbox80 4x5 box2001,reprosum,run10day,gridc +smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridc + +#gridCD + +smoke gx3 8x4 diag1,reprosum,run10day,gridcd +smoke gx3 6x2 alt01,reprosum,run10day,gridcd +smoke gx3 8x2 alt02,reprosum,run10day,gridcd +#smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridcd +smoke gx3 4x4 alt04,reprosum,run10day,gridcd +smoke gx3 4x4 alt05,reprosum,run10day,gridcd +smoke gx3 8x2 alt06,reprosum,run10day,gridcd +smoke gx3 7x2 alt07,reprosum,run10day,gridcd +smoke gx3 8x2 bgczm,reprosum,run10day,gridcd +smoke gx1 15x2 reprosum,run10day,gridcd +smoke gx1 15x2 seabedprob,reprosum,run10day,gridcd +smoke gx3 14x2 fsd12,reprosum,run10day,gridcd +smoke gx3 11x2 isotope,reprosum,run10day,gridcd +smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridcd +#smoke gx3 6x4 dynpicard,reprosum,run10day,gridcd +smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd,thread + +smoke gbox128 8x2 reprosum,run10day,gridcd +smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridcd +#smoke gbox128 9x2 boxadv,reprosum,run10day,gridcd +smoke gbox128 14x2 boxrestore,reprosum,run10day,gridcd +smoke gbox80 4x5 box2001,reprosum,run10day,gridcd +smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridcd + +sleep 180 + +#gridB + smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest smoke_gx3_8x4_diag1_reprosum_run10day smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_diag1_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread smoke_gx3_6x2_alt01_reprosum_run10day @@ -41,7 +98,6 @@ smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread smoke_gx3_11x2_isotope_reprosum_run10day smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread smoke_gx3_8x4_icdefault_reprosum_run10day_snwgrain_snwitdrdg smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread smoke_gx3_6x4_dynpicard_reprosum_run10day -smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread smoke_gx3_8x3_reprosum_run10day_zsal smoke gx3 4x2x25x29x4 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day_thread smoke gx3 8x4x5x4x80 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day_thread @@ -54,31 +110,6 @@ smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread #gridC -smoke gx3 8x4 diag1,reprosum,run10day,gridc -smoke gx3 6x2 alt01,reprosum,run10day,gridc -smoke gx3 8x2 alt02,reprosum,run10day,gridc -#smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridc -smoke gx3 4x4 alt04,reprosum,run10day,gridc -smoke gx3 4x4 alt05,reprosum,run10day,gridc -smoke gx3 8x2 alt06,reprosum,run10day,gridc -smoke gx3 7x2 alt07,reprosum,run10day,gridc -smoke gx3 8x2 bgczm,reprosum,run10day,gridc -smoke gx1 15x2 reprosum,run10day,gridc -smoke gx1 15x2 seabedprob,reprosum,run10day,gridc -smoke gx3 14x2 fsd12,reprosum,run10day,gridc -smoke gx3 11x2 isotope,reprosum,run10day,gridc -smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridc -#smoke gx3 6x4 dynpicard,reprosum,run10day,gridc -smoke gx3 8x3 zsal,reprosum,run10day,gridc -smoke gx3 1x1x100x116x1 reprosum,run10day,gridc,thread - -smoke gbox128 8x2 reprosum,run10day,gridc -smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridc -#smoke gbox128 9x2 boxadv,reprosum,run10day,gridc -smoke gbox128 14x2 boxrestore,reprosum,run10day,gridc -smoke gbox80 4x5 box2001,reprosum,run10day,gridc -smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridc - smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridc smoke_gx3_8x4_diag1_gridc_reprosum_run10day smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_diag1_gridc_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x2_alt01_gridc_reprosum_run10day @@ -95,7 +126,6 @@ smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,grid smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_11x2_gridc_isotope_reprosum_run10day smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x4_gridc_icdefault_reprosum_run10day_snwgrain_snwitdrdg #smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_6x4_dynpicard_gridc_reprosum_run10day -smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridc smoke_gx3_8x3_gridc_reprosum_run10day_zsal smoke gx3 4x2x25x29x4 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day_thread smoke gx3 8x4x5x4x80 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day_thread @@ -108,31 +138,6 @@ smoke gbox80 8x1 boxslotcyl,reprosum,run10day,cmplogrest,thread #gridCD -smoke gx3 8x4 diag1,reprosum,run10day,gridcd -smoke gx3 6x2 alt01,reprosum,run10day,gridcd -smoke gx3 8x2 alt02,reprosum,run10day,gridcd -#smoke gx3 12x2 alt03,droundrobin,reprosum,run10day,gridcd -smoke gx3 4x4 alt04,reprosum,run10day,gridcd -smoke gx3 4x4 alt05,reprosum,run10day,gridcd -smoke gx3 8x2 alt06,reprosum,run10day,gridcd -smoke gx3 7x2 alt07,reprosum,run10day,gridcd -smoke gx3 8x2 bgczm,reprosum,run10day,gridcd -smoke gx1 15x2 reprosum,run10day,gridcd -smoke gx1 15x2 seabedprob,reprosum,run10day,gridcd -smoke gx3 14x2 fsd12,reprosum,run10day,gridcd -smoke gx3 11x2 isotope,reprosum,run10day,gridcd -smoke gx3 8x4 snwitdrdg,snwgrain,icdefault,reprosum,run10day,gridcd -#smoke gx3 6x4 dynpicard,reprosum,run10day,gridcd -smoke gx3 8x3 zsal,reprosum,run10day,gridcd -smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd,thread - -smoke gbox128 8x2 reprosum,run10day,gridcd -smoke gbox128 12x2 boxnodyn,reprosum,run10day,gridcd -#smoke gbox128 9x2 boxadv,reprosum,run10day,gridcd -smoke gbox128 14x2 boxrestore,reprosum,run10day,gridcd -smoke gbox80 4x5 box2001,reprosum,run10day,gridcd -smoke gbox80 11x3 boxslotcyl,reprosum,run10day,gridcd - smoke gx3 4x2 diag1,reprosum,run10day,cmplogrest,gridcd smoke_gx3_8x4_diag1_gridcd_reprosum_run10day smoke gx3 4x1 diag1,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_diag1_gridcd_reprosum_run10day smoke gx3 8x1 alt01,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x2_alt01_gridcd_reprosum_run10day @@ -149,7 +154,6 @@ smoke gx3 8x1 fsd12,reprosum,run10day,cmplogrest,thread,grid smoke gx3 8x1 isotope,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_11x2_gridcd_isotope_reprosum_run10day smoke gx3 8x1 snwitdrdg,snwgrain,icdefault,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x4_gridcd_icdefault_reprosum_run10day_snwgrain_snwitdrdg #smoke gx3 8x1 dynpicard,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_6x4_dynpicard_gridcd_reprosum_run10day -smoke gx3 8x1 zsal,reprosum,run10day,cmplogrest,thread,gridcd smoke_gx3_8x3_gridcd_reprosum_run10day_zsal smoke gx3 4x2x25x29x4 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day_thread smoke gx3 8x4x5x4x80 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day_thread diff --git a/configuration/scripts/tests/perf_suite.ts b/configuration/scripts/tests/perf_suite.ts index 9a17d8a55..a4d8ef588 100644 --- a/configuration/scripts/tests/perf_suite.ts +++ b/configuration/scripts/tests/perf_suite.ts @@ -1,25 +1,24 @@ # Test Grid PEs Sets BFB-compare -smoke gx1 1x1x320x384x1 run2day,droundrobin +smoke gx1 32x1x16x16x15 run2day,droundrobin smoke gx1 64x1x16x16x8 run2day,droundrobin,thread -sleep 180 # -smoke gx1 1x1x320x384x1 run2day,droundrobin -smoke gx1 1x1x160x192x4 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 1x1x80x96x16 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 1x1x40x48x64 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 1x1x20x24x256 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 1x1x320x384x1 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 1x1x160x192x4 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 1x1x80x96x16 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 1x1x40x48x64 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 1x1x20x24x256 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day # -smoke gx1 1x1x16x16x480 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 2x1x16x16x240 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 4x1x16x16x120 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 8x1x16x16x60 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 16x1x16x16x30 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 32x1x16x16x15 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 64x1x16x16x8 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 128x1x16x16x4 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day +smoke gx1 1x1x16x16x480 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 2x1x16x16x240 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 4x1x16x16x120 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 8x1x16x16x60 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 16x1x16x16x30 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +#smoke gx1 32x1x16x16x15 run2day,droundrobin +smoke gx1 64x1x16x16x8 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 128x1x16x16x4 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day # -smoke gx1 64x1x16x16x8 run2day,droundrobin smoke_gx1_1x1x320x384x1_droundrobin_run2day -smoke gx1 64x1x16x16x8 run2day,droundrobin,thread +smoke gx1 64x1x16x16x8 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +#smoke gx1 64x1x16x16x8 run2day,droundrobin,thread smoke gx1 32x2x16x16x16 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread smoke gx1 16x4x16x16x32 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread smoke gx1 8x8x16x16x64 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index a8b9d08f1..0e9d21517 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -350,7 +350,7 @@ either Celsius or Kelvin units). Deprecated parameters are listed at the end. "ice_ic", "choice of initial conditions (see :ref:`tab-ic`)", "" "ice_stdout", "unit number for standard output", "" "ice_stderr", "unit number for standard error output", "" - "ice_ref_salinity", "reference salinity for ice–ocean exchanges", "4. ppt" + "ice_ref_salinity", "reference salinity for ice–ocean exchanges", "" "icells", "number of grid cells with specified property (for vectorization)", "" "iceruf", "ice surface roughness at atmosphere interface", "5.\ :math:`\times`\ 10\ :math:`^{-4}` m" "iceruf_ocn", "under-ice roughness (at ocean interface)", "0.03 m" @@ -677,6 +677,7 @@ either Celsius or Kelvin units). Deprecated parameters are listed at the end. "Tf", "freezing temperature", "C" "Tffresh", "freezing temp of fresh ice", "273.15 K" "tfrz_option", "form of ocean freezing temperature", "" + "saltflux_option", "form of coupled salt flux ", "" "thinS", "minimum ice thickness for brine tracer", "" "timer_stats", "logical to turn on extra timer statistics", ".false." "timesecs", "total elapsed time in seconds", "s" diff --git a/doc/source/conf.py b/doc/source/conf.py index a1b2871ae..88b98bc09 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -62,9 +62,9 @@ # built documents. # # The short X.Y version. -version = u'6.4.0' +version = u'6.4.1' # The full version, including alpha/beta/rc tags. -version = u'6.4.0' +version = u'6.4.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/developer_guide/dg_dynamics.rst b/doc/source/developer_guide/dg_dynamics.rst index 48dead1cb..1f1430e71 100644 --- a/doc/source/developer_guide/dg_dynamics.rst +++ b/doc/source/developer_guide/dg_dynamics.rst @@ -9,14 +9,14 @@ Dynamics The CICE **cicecore/** directory consists of the non icepack source code. Within that directory there are the following subdirectories -**cicecore/cicedynB/analysis** contains higher level history and diagnostic routines. +**cicecore/cicedyn/analysis** contains higher level history and diagnostic routines. -**cicecore/cicedynB/dynamics** contains all the dynamical evp, eap, and transport routines. +**cicecore/cicedyn/dynamics** contains all the dynamical evp, eap, and transport routines. -**cicecore/cicedynB/general** contains routines associated with forcing, flux calculation, +**cicecore/cicedyn/general** contains routines associated with forcing, flux calculation, initialization, and model timestepping. -**cicecore/cicedynB/infrastructure** contains most of the low-level infrastructure associated +**cicecore/cicedyn/infrastructure** contains most of the low-level infrastructure associated with communication (halo updates, gather, scatter, global sums, etc) and I/O reading and writing binary and netcdf files. @@ -29,7 +29,7 @@ coupling layers. Dynamical Solvers -------------------- -The dynamics solvers are found in **cicecore/cicedynB/dynamics/**. A couple of different solvers are +The dynamics solvers are found in **cicecore/cicedyn/dynamics/**. A couple of different solvers are available including EVP, EAP and VP. The dynamics solver is specified in namelist with the ``kdyn`` variable. ``kdyn=1`` is evp, ``kdyn=2`` is eap, ``kdyn=3`` is VP. @@ -41,7 +41,7 @@ with the tripole grid. Transport ----------------- -The transport (advection) methods are found in **cicecore/cicedynB/dynamics/**. Two methods are supported, +The transport (advection) methods are found in **cicecore/cicedyn/dynamics/**. Two methods are supported, upwind and remap. These are set in namelist via the ``advection`` variable. Transport can be disabled with the ``ktransport`` namelist variable. @@ -94,11 +94,11 @@ Two low-level communications packages, mpi and serial, are provided as part of C provides a middle layer between the model and the underlying libraries. Only the CICE mpi or serial directories are compiled with CICE, not both. -**cicedynB/infrastructure/comm/mpi/** +**cicedyn/infrastructure/comm/mpi/** is based on MPI and provides various methods to do halo updates, global sums, gather/scatter, broadcasts and similar using some fairly generic interfaces to isolate the MPI calls in the code. -**cicedynB/infrastructure/comm/serial/** support the same interfaces, but operates +**cicedyn/infrastructure/comm/serial/** support the same interfaces, but operates in shared memory mode with no MPI. The serial library will be used, by default in the CICE scripts, if the number of MPI tasks is set to 1. The serial library allows the model to be run on a single core or with OpenMP parallelism only without requiring an MPI library. @@ -112,15 +112,15 @@ Only one of the three IO directories can be built with CICE. The CICE scripts w by default, but other options can be selecting by setting ``ICE_IOTYPE`` in **cice.settings** in the case. This has to be set before CICE is built. -**cicedynB/infrastructure/io/io_netcdf/** is the +**cicedyn/infrastructure/io/io_netcdf/** is the default for the standalone CICE model, and it supports writing history and restart files in netcdf format using standard netcdf calls. It does this by writing from and reading to the root task and gathering and scattering fields from the root task to support model parallelism. -**cicedynB/infrastructure/io/io_binary/** supports files in binary format using a gather/scatter +**cicedyn/infrastructure/io/io_binary/** supports files in binary format using a gather/scatter approach and reading to and writing from the root task. -**cicedynB/infrastructure/io/io_pio/** support reading and writing through the pio interface. pio +**cicedyn/infrastructure/io/io_pio/** support reading and writing through the pio interface. pio is a parallel io library (https://github.com/NCAR/ParallelIO) that supports reading and writing of binary and netcdf file through various interfaces including netcdf and pnetcdf. pio is generally more parallel in memory even when using serial netcdf than the standard gather/scatter methods, diff --git a/doc/source/science_guide/sg_horiztrans.rst b/doc/source/science_guide/sg_horiztrans.rst index 2cca81469..d66046465 100644 --- a/doc/source/science_guide/sg_horiztrans.rst +++ b/doc/source/science_guide/sg_horiztrans.rst @@ -11,7 +11,7 @@ thickness category :math:`n`. Equation :eq:`transport-ai` describes the conservation of ice area under horizontal transport. It is obtained from Equation :eq:`transport-g` by discretizing :math:`g` and neglecting the second and third terms on the right-hand side, which are treated -separately (As described in the `Icepack Documentation `_). +separately (As described in the `Icepack Documentation `_). There are similar conservation equations for ice volume (Equation :eq:`transport-vi`), snow volume (Equation :eq:`transport-vs`), ice @@ -33,13 +33,13 @@ introductory comments in **ice\_transport\_remap.F90**. Prognostic equations for ice and/or snow density may be included in future model versions but have not yet been implemented. -One transport scheme is available, the incremental +Two transport schemes are available: upwind and the incremental remapping scheme of :cite:`Dukowicz00` as modified for sea ice by :cite:`Lipscomb04`. - The upwind scheme uses velocity points at the East and North face (i.e. :math:`uvelE=u` at the E point and :math:`vvelN=v` at the N point) of a T gridcell. As such, the prognostic C grid velocity components (:math:`uvelE` and :math:`vvelN`) can be passed directly to the upwind transport scheme. If the upwind scheme is used with the B grid, the B grid velocities, :math:`uvelU` and :math:`vvelU` (respectively :math:`u` and :math:`v` at the U point) are interpolated to the E and N points first. (Note however that the upwind scheme does not transport all potentially available tracers.) -- Remapping is naturally a B-grid transport scheme as the corner (U point) velocity components :math:`uvelU` and :math:`vvelU` are used to calculate departure points. Nevertheless, the remapping scheme can also be used with the C grid by first interpolating :math:`uvelE` and :math:`vvelN` to the U points. +- The remapping scheme uses :math:`uvelU` and :math:`vvelU` if l_fixed_area is false and :math:`uvelE` and :math:`vvelN` if l_fixed_area is true. l_fixed_area is hardcoded to false by default and further described below. As such, the B grid velocities (:math:`uvelU` and :math:`vvelU`) are used directly in the remapping scheme, while the C grid velocities (:math:`uvelE` and :math:`vvelN`) are interpolated to U points first. If l_fixed_area is changed to true, then the reverse is true. The C grid velocities are used directly and the B grid velocities are interpolated. The remapping scheme has several desirable features: @@ -98,7 +98,7 @@ below. After the transport calculation, the sum of ice and open water areas within a grid cell may not add up to 1. The mechanical deformation parameterization in -`Icepack `_ +`Icepack `_ corrects this issue by ridging the ice and creating open water such that the ice and open water areas again add up to 1. @@ -477,16 +477,9 @@ Remote Sensing Center (Norway), who applied an earlier version of the CICE remapping scheme to an ocean model. The implementation in CICE is somewhat more general, allowing for departure regions lying on both sides of a cell edge. The extra triangle is constrained to lie in one -but not both of the grid cells that share the edge. - -The default value for the B grid is `l\_fixed\_area` = false. However, -idealized tests with the C grid have shown that prognostic fields such -as sea ice concentration exhibit a checkerboard pattern with -`l\_fixed\_area` = false. The logical `l\_fixed\_area` is therefore set -to true when using the C grid. The edge areas `edgearea\_e` and `edgearea\_n` -are in this case calculated with the C grid velocity components :math:`uvelE` -and :math:`vvelN`. - +but not both of the grid cells that share the edge. Since this option +has yet to be fully tested in CICE, the current default is +`l\_fixed\_area` = false. We made one other change in the scheme of :cite:`Dukowicz00` for locating triangles. In their paper, departure points are defined by diff --git a/doc/source/science_guide/sg_tracers.rst b/doc/source/science_guide/sg_tracers.rst index b75edfb00..cbecb9310 100644 --- a/doc/source/science_guide/sg_tracers.rst +++ b/doc/source/science_guide/sg_tracers.rst @@ -92,7 +92,6 @@ is not in use. " ","nslyr","vsno","nt_rhos"," " " ","nslyr","vsno","nt_smice"," " " ","nslyr","vsno","nt_smliq"," " - "solve_zsal", "n_trzs", "fbri or (a,v)ice", "nt_bgc_S", " " "tr_bgc_N", "n_algae", "fbri or (a,v)ice", "nt_bgc_N", "nlt_bgc_N" "tr_bgc_Nit", " ", "fbri or (a,v)ice", "nt_bgc_Nit", "nlt_bgc_Nit" "tr_bgc_C", "n_doc", "fbri or (a,v)ice", "nt_bgc_DOC", "nlt_bgc_DOC" @@ -112,6 +111,7 @@ is not in use. " ", "1", "fbri", "nt_zbgc_frac", " " .. + "solve_zsal", "n_trzs", "fbri or (a,v)ice", "nt_bgc_S", " " "tr_pond_cesm", "2", "aice", "nt_apnd", " " " ", " ", "apnd", "nt_vpnd", " " diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index a34c69822..587adcd56 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -644,6 +644,7 @@ forcing_nml "", "``eastblock``", "ice block covering about 25 percent of domain at the east edge of the domain", "" "", "``latsst``", "ice dependent on latitude and ocean temperature", "" "", "``uniform``", "ice defined at all grid points", "" + "``ice_ref_salinity``", "real", "sea ice salinity for coupling fluxes (ppt)", "4.0" "``iceruf``", "real", "ice surface roughness at atmosphere interface in meters", "0.0005" "``l_mpond_fresh``", "``.false.``", "release pond water immediately to ocean", "``.false.``" "", "``true``", "retain (topo) pond water until ponds drain", "" @@ -666,6 +667,8 @@ forcing_nml "``restore_ocn``", "logical", "restore sst to data", "``.false.``" "``restore_ice``", "logical", "restore ice state along lateral boundaries", "``.false.``" "``rotate_wind``", "logical", "rotate wind from east/north to computation grid", "``.true.``" + "``saltflux_option``", "``constant``", "computed using ice_ref_salinity", "``constant``" + "", "``prognostic``", "computed using prognostic salinity", "" "``tfrz_option``", "``linear_salt``", "linear function of salinity (ktherm=1)", "``mushy``" "", "``minus1p8``", "constant ocean freezing temperature (:math:`-1.8^{\circ} C`)", "" "", "``mushy``", "matches mushy-layer thermo (ktherm=2)", "" @@ -804,14 +807,14 @@ zbgc_nml "``ratio_S2N_sp``", "real", "algal S to N in mol/mol small plankton", "0.03" "``restart_bgc``", "logical", "restart tracer values from file", "``.false.``" "``restart_hbrine``", "logical", "", "``.false.``" - "``restart_zsal``", "logical", "", "``.false.``" + "``restart_zsal``", "logical", "zsalinity DEPRECATED", "``.false.``" "``restore_bgc``", "logical", "restore bgc to data", "``.false.``" "``R_dFe2dust``", "real", "g/g :cite:`Tagliabue09`", "0.035" "``scale_bgc``", "logical", "", "``.false.``" "``silicatetype``", "real", "mobility type between stationary and mobile silicate", "-1.0" "``skl_bgc``", "logical", "biogeochemistry", "``.false.``" "``solve_zbgc``", "logical", "", "``.false.``" - "``solve_zsal``", "logical", "update salinity tracer profile", "``.false.``" + "``solve_zsal``", "logical", "zsalinity DEPRECATED, update salinity tracer profile", "``.false.``" "``tau_max``", "real", "long time mobile to stationary exchanges", "1.73e-5" "``tau_min``", "real", "rapid module to stationary exchanges", "5200." "``tr_bgc_Am``", "logical", "ammonium tracer", "``.false.``" @@ -847,13 +850,13 @@ icefields_nml There are several icefield namelist groups to control model history output. See the source code for a full list of supported output fields. -* ``icefields_nml`` is in **cicecore/cicedynB/analysis/ice_history_shared.F90** -* ``icefields_bgc_nml`` is in **cicecore/cicedynB/analysis/ice_history_bgc.F90** -* ``icefields_drag_nml`` is in **cicecore/cicedynB/analysis/ice_history_drag.F90** -* ``icefields_fsd_nml`` is in **cicecore/cicedynB/analysis/ice_history_fsd.F90** -* ``icefields_mechred_nml`` is in **cicecore/cicedynB/analysis/ice_history_mechred.F90** -* ``icefields_pond_nml`` is in **cicecore/cicedynB/analysis/ice_history_pond.F90** -* ``icefields_snow_nml`` is in **cicecore/cicedynB/analysis/ice_history_snow.F90** +* ``icefields_nml`` is in **cicecore/cicedyn/analysis/ice_history_shared.F90** +* ``icefields_bgc_nml`` is in **cicecore/cicedyn/analysis/ice_history_bgc.F90** +* ``icefields_drag_nml`` is in **cicecore/cicedyn/analysis/ice_history_drag.F90** +* ``icefields_fsd_nml`` is in **cicecore/cicedyn/analysis/ice_history_fsd.F90** +* ``icefields_mechred_nml`` is in **cicecore/cicedyn/analysis/ice_history_mechred.F90** +* ``icefields_pond_nml`` is in **cicecore/cicedyn/analysis/ice_history_pond.F90** +* ``icefields_snow_nml`` is in **cicecore/cicedyn/analysis/ice_history_snow.F90** .. csv-table:: **icefields_nml namelist options** :header: "variable", "options/format", "description", "default value" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index a7cc66948..5ed2092c0 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -47,7 +47,7 @@ as follows **cicecore/** CICE source code -**cicecore/cicedynB/** +**cicecore/cicedyn/** routines associated with the dynamics core **cicecore/drivers/** diff --git a/doc/source/user_guide/ug_testing.rst b/doc/source/user_guide/ug_testing.rst index 4bb44159b..289f626a9 100644 --- a/doc/source/user_guide/ug_testing.rst +++ b/doc/source/user_guide/ug_testing.rst @@ -23,7 +23,8 @@ The testing scripts support several features - Ability to compare results to prior baselines to verify bit-for-bit (``--bcmp``) - Ability to define where baseline tests are stored (``--bdir``) - Ability to compare tests against each other (``--diff``) - - Ability to set account number (``--acct``), which is otherwise not set and may result in tests not being submitted + - Ability to set or overide the batch account number (``--acct``) and queue name (``--queue``) + - Ability to control how test suites execute (``--setup-only``, ``--setup-build``, ``--setup-build-run``, ``--setup-build-submit``) .. _indtests: @@ -229,7 +230,9 @@ boundary around the entire domain. It includes the following namelist modificat - ``dyrect``: ``16.e5`` cm - ``ktherm``: ``-1`` (disables thermodynamics) - ``coriolis``: ``constant`` (``f=1.46e-4`` s\ :math:`^{-1}`) -- ``ice_data_type`` : ``box2001`` (special ice concentration initialization) +- ``ice_data_type`` : ``box2001`` (special initial ice mask) +- ``ice_data_conc`` : ``p5`` +- ``ice_data_dist`` : ``box2001`` (special ice concentration initialization) - ``atm_data_type`` : ``box2001`` (special atmospheric and ocean forcing) Ocean stresses are computed as in :cite:`Hunke01` where they are circular and centered @@ -257,7 +260,9 @@ boundary around the entire domain. It includes the following namelist modificat - ``ktherm``: ``-1`` (disables thermodynamics) - ``kridge``: ``-1`` (disables ridging) - ``kdyn``: ``-1`` (disables dynamics) -- ``ice_data_type`` : ``boxslotcyl`` (special ice concentration and velocity initialization) +- ``ice_data_type`` : ``boxslotcyl`` (special initial ice mask) +- ``ice_data_conc`` : ``c1`` +- ``ice_data_dist`` : ``uniform`` Dynamics is disabled because we directly impose a constant ice velocity. The ice velocity field is circular and centered in the square domain, and such that the slotted cylinder makes a complete revolution with a period :math:`T=` 12 days : @@ -297,15 +302,6 @@ results.csh script in the testsuite.[testid]:: cd testsuite.[testid] ./results.csh -To report the test results, as is required for Pull Requests to be accepted into -the master the CICE Consortium code see :ref:`testreporting`. - -If using the ``--tdir`` option, that directory must not exist before the script is run. The tdir directory will be -created by the script and it will be populated by all tests as well as scripts that support the -test suite:: - - ./cice.setup --suite base_suite --mach wolf --env gnu --testid myid --tdir /scratch/$user/testsuite.myid - Multiple suites are supported on the command line as comma separated arguments:: ./cice.setup --suite base_suite,decomp_suite --mach wolf --env gnu --testid myid @@ -318,9 +314,48 @@ The option settings defined at the command line have precedence over the test su values if there are conflicts. The predefined test suites are defined under **configuration/scripts/tests** and -the files defining the suites -have a suffix of .ts in that directory. The format for the test suite file -is relatively simple. +the files defining the suites have a suffix of .ts in that directory. Some of the +available tests suites are + +``quick_suite`` + consists of a handful of basic CICE tests + +``base_suite`` + consists of a much large suite of tests covering much of the CICE functionality + +``decomp_suite`` + checks that different decompositions and pe counts produce bit-for-bit results + +``omp_suite`` + checks that OpenMP single thread and multi-thread cases are bit-for-bit identical + +``io_suite`` + tests the various IO options including binary, netcdf, and pio. PIO should be installed locally and accessible to the CICE build system to make full use of this suite. + +``perf_suite`` + runs a series of tests to evaluate model scaling and performance + +``reprosum_suite`` + verifies that CICE log files are bit-for-bit with different decompositions and pe counts when the bfbflag is set to reprosum + +``gridsys_suite`` + tests B, C, and CD grid_ice configurations + +``prod_suite`` + consists of a handful of tests running 5 to 10 model years and includes some QC testing. These tests will be relatively expensive and take more time compared to other suites. + +``unittest_suite`` + runs unit tests in the CICE repository + +``travis_suite`` + consists of a small suite of tests suitable for running on low pe counts. This is the suite used with Github Actions for CI in the workflow. + +``first_suite`` + this small suite of tests is redundant with tests in other suites. It runs several of the critical baseline tests that other test compare to. It can improve testing turnaround if listed first in a series of test suites. + +When running multiple suites on the command line (i.e. ``--suite first_suite,base_suite,omp_suite``) the suites will be run in the order defined by the user and redundant tests across multiple suites will be created and executed only once. + +The format for the test suite file is relatively simple. It is a text file with white space delimited columns that define a handful of values in a specific order. The first column is the test name, the second the grid, the third the pe count, @@ -410,8 +445,26 @@ The *cice.setup** options ``--setup-only``, ``--setup-build``, and ``--setup-bui which means by default the test suite builds and submits the jobs. By defining other values for those environment variables, users can control the suite script. When using **suite.submit** manually, the string ``true`` (all lowercase) is the only string that will turn on a feature, and both SUITE_RUN and SUITE_SUBMIT cannot be true at the same time. -By leveraging the **cice.setup** command line arguments ``--setup-only``, ``--setup-build``, and ``--setup-build-run`` as well as the environment variables SUITE_BUILD, SUITE_RUN, and SUITE_SUBMIT, users can run **cice.setup** and **suite.submit** in various combinations to quickly setup, setup and build, submit, resubmit, run interactively, or rebuild and resubmit full testsuites quickly and easily. See below for an example. +By leveraging the **cice.setup** command line arguments ``--setup-only``, ``--setup-build``, and ``--setup-build-run`` as well as the environment variables SUITE_BUILD, SUITE_RUN, and SUITE_SUBMIT, users can run **cice.setup** and **suite.submit** in various combinations to quickly setup, setup and build, submit, resubmit, run interactively, or rebuild and resubmit full testsuites quickly and easily. See :ref:`examplesuites` for an example. +The script **create_fails.csh** will process the output from results.csh and generate a new +test suite file, **fails.ts**, from the failed tests. +**fails.ts** can then be edited and passed into ``cice.setup --suite fails.ts ...`` to rerun +subsets of failed tests to more efficiently move thru the development, testing, and +validation process. However, a full test suite should be run on the final development +version of the code. + +To report the test results, as is required for Pull Requests to be accepted into +the master the CICE Consortium code see :ref:`testreporting`. + +If using the ``--tdir`` option, that directory must not exist before the script is run. The tdir directory will be +created by the script and it will be populated by all tests as well as scripts that support the +test suite:: + + ./cice.setup --suite base_suite --mach wolf --env gnu --testid myid --tdir /scratch/$user/testsuite.myid + + +.. _examplesuites: Test Suite Examples ~~~~~~~~~~~~~~~~~~~~~~~~ @@ -682,9 +735,12 @@ The following are brief descriptions of some of the current unit tests, both sets of software are tested independently and correctness is verified. - **calchk** is a unit test that exercises the CICE calendar over 100,000 years and verifies correctness. This test does not depend on the CICE initialization. + - **gridavgchk** is a unit test that exercises the CICE grid_average_X2Y methods and verifies results. - **helloworld** is a simple test that writes out helloworld and uses no CICE infrastructure. This tests exists to demonstrate how to build a unit test by specifying the object files directly in the Makefile + - **optargs** is a unit test that tests passing optional arguments down a calling tree and verifying + that the optional attribute is preserved correctly. - **sumchk** is a unit test that exercises the methods in ice_global_reductions.F90. This test requires that a CICE grid and decomposition be initialized, so CICE_InitMod.F90 is leveraged to initialize the model prior to running a suite of unit validation tests to verify correctness. @@ -1128,7 +1184,7 @@ If the regression comparisons fail, then you may want to run the QC test, # From the updated sandbox # Generate the same test case(s) as the baseline using options or namelist changes to activate new code modifications - ./cice.setup -m onyx -e intel --test smoke -g gx1 -p 44x1 -testid qc_test -s qc,medium + ./cice.setup -m onyx -e intel --test smoke -g gx1 -p 44x1 --testid qc_test -s qc,medium cd onyx_intel_smoke_gx1_44x1_medium_qc.qc_test # modify ice_in to activate the namelist options that were determined above ./cice.build @@ -1137,7 +1193,8 @@ If the regression comparisons fail, then you may want to run the QC test, # Wait for runs to finish # Perform the QC test - cp configuration/scripts/tests/QC/cice.t-test.py + # From the updated sandbox + cp configuration/scripts/tests/QC/cice.t-test.py . ./cice.t-test.py /p/work/turner/CICE_RUNS/onyx_intel_smoke_gx1_44x1_medium_qc.qc_base \ /p/work/turner/CICE_RUNS/onyx_intel_smoke_gx1_44x1_medium_qc.qc_test diff --git a/doc/source/user_guide/ug_troubleshooting.rst b/doc/source/user_guide/ug_troubleshooting.rst index 3a7bff965..ac96b92af 100644 --- a/doc/source/user_guide/ug_troubleshooting.rst +++ b/doc/source/user_guide/ug_troubleshooting.rst @@ -7,6 +7,16 @@ Troubleshooting Check the FAQ: https://github.com/CICE-Consortium/CICE/wiki +.. _dirsetup: + +Directory Structure +--------------------- + +In November, 2022, the cicedynB directory was renamed to cicedyn. +A soft link was temporarily added to preserve the ability to use +cicedynB as a path when compiling CICE in other build systems. This +soft link will be removed in the future. + .. _setup: Initial setup @@ -209,6 +219,16 @@ be found in the `Icepack documentation `_. +VP dynamics results +---------------------------------------- + +The VP dynamics solver (`kdyn=3`) requires a global sum. This global sum +is computed by default via an efficient implementation that is not bit-for-bit +for different decompositions or pe counts. Bit-for-bit identical results +can be recovered for the VP dynamics solver by setting the namelist +`bfbflag = reprosum` or using the `-s reprosum` option when setting up a case. + + Proliferating subprocess parameterizations -------------------------------------------------------