From 0e18b2fdfd6ec7e05442c95dac2d8d21dcf5a0cc Mon Sep 17 00:00:00 2001 From: apcraig Date: Wed, 2 Jun 2021 11:33:05 -0600 Subject: [PATCH 1/6] Update testing - Set default debug_model_step=0 (was 99999999) - Add debug_model_[i,j,iblk,task] to define the debug_model diagnostic point in local grid index space. If this point is not set and debug_model is turned on, it will use lonpnt(1),latpnt(1). - Rename forcing_diag namelist/variable to debug_forcing to be more consistent with other "debug_" namelist variables - Rename the local variable forcing_debug in ice_forcing.F90 to local_debug to avoid confusion with global varaible debug_forcing. - Add namelist variable optics_file. Was hardwired in ice_forcing_bgc.F90 - Update optics file variable name to read, still hardwired in model. - Update setting of nbtrcr_sw and allocation of trcrn_sw. nbtrcr_sw was not set in icepack after it was computed and trcrn_sw was allocated before nbtrcr_sw was computed. This impacts the dedd_algae implementation which still isn't working. - move default distribution_wgt_file for gx1 to set_nml.gx1 - update test suite, add testing of debug_model_[i,j,iblk,task], add addtional testing of maskhalo - update documentation --- .../cicedynB/analysis/ice_diagnostics.F90 | 45 +++- cicecore/cicedynB/general/ice_forcing.F90 | 234 +++++++++--------- cicecore/cicedynB/general/ice_forcing_bgc.F90 | 45 ++-- cicecore/cicedynB/general/ice_init.F90 | 26 +- cicecore/shared/ice_arrays_column.F90 | 10 +- cicecore/shared/ice_init_column.F90 | 19 +- configuration/scripts/cice.settings | 2 +- configuration/scripts/ice_in | 9 +- configuration/scripts/options/set_nml.bigdiag | 2 +- configuration/scripts/options/set_nml.diagpt1 | 5 + .../scripts/options/set_nml.dwghtfile | 1 - configuration/scripts/options/set_nml.gx1 | 1 + configuration/scripts/tests/base_suite.ts | 25 +- configuration/scripts/tests/unittest_suite.ts | 2 +- doc/source/cice_index.rst | 9 +- doc/source/user_guide/ug_case_settings.rst | 9 +- doc/source/user_guide/ug_implementation.rst | 19 +- doc/source/user_guide/ug_troubleshooting.rst | 12 + 18 files changed, 281 insertions(+), 194 deletions(-) create mode 100644 configuration/scripts/options/set_nml.diagpt1 diff --git a/cicecore/cicedynB/analysis/ice_diagnostics.F90 b/cicecore/cicedynB/analysis/ice_diagnostics.F90 index 3eaf9d057..6b9b32301 100644 --- a/cicecore/cicedynB/analysis/ice_diagnostics.F90 +++ b/cicecore/cicedynB/analysis/ice_diagnostics.F90 @@ -39,7 +39,7 @@ module ice_diagnostics print_global ! if true, print global data integer (kind=int_kind), public :: & - debug_model_step = 999999999 ! begin printing at istep1=debug_model_step + debug_model_step = 0 ! begin printing at istep1=debug_model_step integer (kind=int_kind), parameter, public :: & npnt = 2 ! total number of points to be printed @@ -73,6 +73,12 @@ module ice_diagnostics integer (kind=int_kind), dimension(npnt), public :: & piloc, pjloc, pbloc, pmloc ! location of diagnostic points + integer (kind=int_kind), public :: & + debug_model_i = -1, & ! location of debug_model point, local i index + debug_model_j = -1, & ! location of debug_model point, local j index + debug_model_iblk = -1, & ! location of debug_model point, local block number + debug_model_task = -1 ! location of debug_model point, local task number + ! for hemispheric water and heat budgets real (kind=dbl_kind) :: & totmn , & ! total ice/snow water mass (nh) @@ -1432,9 +1438,9 @@ subroutine init_diags write(nu_diag,*) ' Find indices of diagnostic points ' endif - piloc(:) = 0 - pjloc(:) = 0 - pbloc(:) = 0 + piloc(:) = -1 + pjloc(:) = -1 + pbloc(:) = -1 pmloc(:) = -999 plat(:) = -999._dbl_kind plon(:) = -999._dbl_kind @@ -1535,16 +1541,29 @@ subroutine debug_ice(iblk, plabeld) integer (kind=int_kind) :: i, j, m character(len=*), parameter :: subname='(debug_ice)' -! tcraig, do this only on one point, the first point -! do m = 1, npnt - m = 1 - if (istep1 >= debug_model_step .and. & - iblk == pbloc(m) .and. my_task == pmloc(m)) then - i = piloc(m) - j = pjloc(m) - call print_state(plabeld,i,j,iblk) + if (istep1 >= debug_model_step) then + + ! set debug point to 1st global point if not set as local values + if (debug_model_i < 0 .and. debug_model_j < 0 .and. & + debug_model_iblk < 0 .and. debug_model_task < 0) then + debug_model_i = piloc(1) + debug_model_j = pjloc(1) + debug_model_task = pmloc(1) + debug_model_iblk = pbloc(1) + endif + + ! if debug point is messed up, abort + if (debug_model_i < 0 .or. debug_model_j < 0 .or. & + debug_model_iblk < 0 .or. debug_model_task < 0) then + call abort_ice (subname//'ERROR: debug_model_[i,j,iblk,mytask] not set correctly') endif -! enddo + + ! write out debug info + if (debug_model_iblk == iblk .and. debug_model_task == my_task) then + call print_state(plabeld,debug_model_i,debug_model_j,debug_model_iblk) + endif + + endif end subroutine debug_ice diff --git a/cicecore/cicedynB/general/ice_forcing.F90 b/cicecore/cicedynB/general/ice_forcing.F90 index 200b3d00b..a71e6dd17 100755 --- a/cicecore/cicedynB/general/ice_forcing.F90 +++ b/cicecore/cicedynB/general/ice_forcing.F90 @@ -158,7 +158,7 @@ module ice_forcing trest ! restoring time scale (sec) logical (kind=log_kind), public :: & - forcing_diag ! prints forcing debugging output if true + debug_forcing ! prints forcing debugging output if true real (dbl_kind), dimension(:), allocatable, public :: & jday_atm ! jday time vector from atm forcing files @@ -173,7 +173,7 @@ module ice_forcing mixed_layer_depth_default = c20 ! default mixed layer depth in m logical (kind=log_kind), parameter :: & - forcing_debug = .false. ! local debug flag + local_debug = .false. ! local debug flag !======================================================================= @@ -187,7 +187,7 @@ subroutine alloc_forcing integer (int_kind) :: ierr character(len=*), parameter :: subname = '(alloc_forcing)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' allocate ( & cldf(nx_block,ny_block, max_blocks), & ! cloud fraction @@ -235,13 +235,13 @@ subroutine init_forcing_atmo integer (kind=int_kind) :: modadj ! adjustment for mod function character(len=*), parameter :: subname = '(init_forcing_atmo)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' modadj = abs((min(0,myear-fyear_init)/ycycle+1)*ycycle) fyear = fyear_init + mod(myear-fyear_init+modadj,ycycle) fyear_final = fyear_init + ycycle - 1 ! last year in forcing cycle - if (forcing_debug .and. my_task == master_task) then + if (local_debug .and. my_task == master_task) then write(nu_diag,*) subname,'fdbg fyear = ',fyear,fyear_init,fyear_final write(nu_diag,*) subname,'fdbg atm_data_type = ',trim(atm_data_type) endif @@ -344,7 +344,7 @@ subroutine init_forcing_ocn(dt) character(len=*), parameter :: subname = '(init_forcing_ocn)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(secday_out=secday) call icepack_warnings_flush(nu_diag) @@ -389,7 +389,7 @@ subroutine init_forcing_ocn(dt) sss(:,:,:) = c0 do k = 1,12 ! loop over 12 months - call ice_read (nu_forcing, k, work1, 'rda8', forcing_diag, & + call ice_read (nu_forcing, k, work1, 'rda8', debug_forcing, & field_loc_center, field_type_scalar) !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks @@ -436,7 +436,7 @@ subroutine init_forcing_ocn(dt) if (my_task == master_task) & call ice_open (nu_forcing, sst_file, nbits) - call ice_read (nu_forcing, mmonth, sst, 'rda8', forcing_diag, & + call ice_read (nu_forcing, mmonth, sst, 'rda8', debug_forcing, & field_loc_center, field_type_scalar) if (my_task == master_task) close(nu_forcing) @@ -520,7 +520,7 @@ subroutine ocn_freezing_temperature character(len=*), parameter :: subname = '(ocn_freezing_temperature)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks @@ -565,7 +565,7 @@ subroutine get_forcing_atmo character(len=*), parameter :: subname = '(get_forcing_atmo)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call ice_timer_start(timer_forcing) @@ -588,7 +588,7 @@ subroutine get_forcing_atmo ! Read and interpolate atmospheric data !------------------------------------------------------------------- - if (forcing_debug .and. my_task == master_task) then + if (local_debug .and. my_task == master_task) then write(nu_diag,*) subname,'fdbg fyear = ',fyear write(nu_diag,*) subname,'fdbg atm_data_type = ',trim(atm_data_type) endif @@ -688,11 +688,11 @@ subroutine get_forcing_ocn (dt) character(len=*), parameter :: subname = '(get_forcing_ocn)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call ice_timer_start(timer_forcing) - if (forcing_debug .and. my_task == master_task) then + if (local_debug .and. my_task == master_task) then write(nu_diag,*) subname,'fdbg fyear = ',fyear write(nu_diag,*) subname,'fdbg ocn_data_type = ',trim(ocn_data_type) endif @@ -770,15 +770,15 @@ subroutine read_data (flag, recd, yr, ixm, ixx, ixp, & character(len=*), parameter :: subname = '(read_data)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call ice_timer_start(timer_readwrite) ! reading/writing nbits = 64 ! double precision data - if (istep1 > debug_model_step) forcing_diag = .true. !! debugging + if (istep1 > debug_model_step) debug_forcing = .true. !! debugging - if (my_task==master_task .and. (forcing_diag)) then + if (my_task==master_task .and. (debug_forcing)) then write(nu_diag,*) ' ', trim(data_file) endif @@ -816,7 +816,7 @@ subroutine read_data (flag, recd, yr, ixm, ixx, ixp, & arg = 1 nrec = recd + n2 call ice_read (nu_forcing, nrec, field_data(:,:,arg,:), & - 'rda8', forcing_diag, field_loc, field_type) + 'rda8', debug_forcing, field_loc, field_type) if (ixx==1 .and. my_task == master_task) close(nu_forcing) endif ! ixm ne -99 @@ -828,7 +828,7 @@ subroutine read_data (flag, recd, yr, ixm, ixx, ixp, & arg = arg + 1 nrec = recd + ixx call ice_read (nu_forcing, nrec, field_data(:,:,arg,:), & - 'rda8', forcing_diag, field_loc, field_type) + 'rda8', debug_forcing, field_loc, field_type) if (ixp /= -99) then ! currently in latter half of data interval @@ -853,7 +853,7 @@ subroutine read_data (flag, recd, yr, ixm, ixx, ixp, & arg = arg + 1 nrec = recd + n4 call ice_read (nu_forcing, nrec, field_data(:,:,arg,:), & - 'rda8', forcing_diag, field_loc, field_type) + 'rda8', debug_forcing, field_loc, field_type) endif ! ixp /= -99 if (my_task == master_task) close(nu_forcing) @@ -923,13 +923,13 @@ subroutine read_data_nc (flag, recd, yr, ixm, ixx, ixp, & character(len=*), parameter :: subname = '(read_data_nc)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call ice_timer_start(timer_readwrite) ! reading/writing - if (istep1 > debug_model_step) forcing_diag = .true. !! debugging + if (istep1 > debug_model_step) debug_forcing = .true. !! debugging - if (my_task==master_task .and. (forcing_diag)) then + if (my_task==master_task .and. (debug_forcing)) then write(nu_diag,*) ' ', trim(data_file) endif @@ -968,7 +968,7 @@ subroutine read_data_nc (flag, recd, yr, ixm, ixx, ixp, & nrec = recd + n2 call ice_read_nc & - (fid, nrec, fieldname, field_data(:,:,arg,:), forcing_diag, & + (fid, nrec, fieldname, field_data(:,:,arg,:), debug_forcing, & field_loc, field_type) if (ixx==1) call ice_close_nc(fid) @@ -982,7 +982,7 @@ subroutine read_data_nc (flag, recd, yr, ixm, ixx, ixp, & nrec = recd + ixx call ice_read_nc & - (fid, nrec, fieldname, field_data(:,:,arg,:), forcing_diag, & + (fid, nrec, fieldname, field_data(:,:,arg,:), debug_forcing, & field_loc, field_type) if (ixp /= -99) then @@ -1008,7 +1008,7 @@ subroutine read_data_nc (flag, recd, yr, ixm, ixx, ixp, & nrec = recd + n4 call ice_read_nc & - (fid, nrec, fieldname, field_data(:,:,arg,:), forcing_diag, & + (fid, nrec, fieldname, field_data(:,:,arg,:), debug_forcing, & field_loc, field_type) endif ! ixp /= -99 @@ -1061,13 +1061,13 @@ subroutine read_data_nc_hycom (flag, recd, & character(len=*), parameter :: subname = '(read_data_nc_hycom)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call ice_timer_start(timer_readwrite) ! reading/writing - if (istep1 > debug_model_step) forcing_diag = .true. !! debugging + if (istep1 > debug_model_step) debug_forcing = .true. !! debugging - if (my_task==master_task .and. (forcing_diag)) then + if (my_task==master_task .and. (debug_forcing)) then write(nu_diag,*) ' ', trim(data_file) endif @@ -1078,11 +1078,11 @@ subroutine read_data_nc_hycom (flag, recd, & ! read data !----------------------------------------------------------------- call ice_read_nc & - (fid, recd , fieldname, field_data(:,:,1,:), forcing_diag, & + (fid, recd , fieldname, field_data(:,:,1,:), debug_forcing, & field_loc, field_type) call ice_read_nc & - (fid, recd+1, fieldname, field_data(:,:,2,:), forcing_diag, & + (fid, recd+1, fieldname, field_data(:,:,2,:), debug_forcing, & field_loc, field_type) call ice_close_nc(fid) @@ -1131,15 +1131,15 @@ subroutine read_clim_data (readflag, recd, ixm, ixx, ixp, & character(len=*), parameter :: subname = '(read_clim_data)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call ice_timer_start(timer_readwrite) ! reading/writing nbits = 64 ! double precision data - if (istep1 > debug_model_step) forcing_diag = .true. !! debugging + if (istep1 > debug_model_step) debug_forcing = .true. !! debugging - if (my_task==master_task .and. (forcing_diag)) & + if (my_task==master_task .and. (debug_forcing)) & write(nu_diag,*) ' ', trim(data_file) if (readflag) then @@ -1155,19 +1155,19 @@ subroutine read_clim_data (readflag, recd, ixm, ixx, ixp, & arg = 1 nrec = recd + ixm call ice_read (nu_forcing, nrec, field_data(:,:,arg,:), & - 'rda8', forcing_diag, field_loc, field_type) + 'rda8', debug_forcing, field_loc, field_type) endif arg = arg + 1 nrec = recd + ixx call ice_read (nu_forcing, nrec, field_data(:,:,arg,:), & - 'rda8', forcing_diag, field_loc, field_type) + 'rda8', debug_forcing, field_loc, field_type) if (ixp /= -99) then arg = arg + 1 nrec = recd + ixp call ice_read (nu_forcing, nrec, field_data(:,:,arg,:), & - 'rda8', forcing_diag, field_loc, field_type) + 'rda8', debug_forcing, field_loc, field_type) endif if (my_task == master_task) close (nu_forcing) @@ -1218,13 +1218,13 @@ subroutine read_clim_data_nc (readflag, recd, ixm, ixx, ixp, & character(len=*), parameter :: subname = '(read_clim_data_nc)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call ice_timer_start(timer_readwrite) ! reading/writing - if (istep1 > debug_model_step) forcing_diag = .true. !! debugging + if (istep1 > debug_model_step) debug_forcing = .true. !! debugging - if (my_task==master_task .and. (forcing_diag)) & + if (my_task==master_task .and. (debug_forcing)) & write(nu_diag,*) ' ', trim(data_file) if (readflag) then @@ -1241,21 +1241,21 @@ subroutine read_clim_data_nc (readflag, recd, ixm, ixx, ixp, & nrec = recd + ixm call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), & - forcing_diag, field_loc, field_type) + debug_forcing, field_loc, field_type) endif arg = arg + 1 nrec = recd + ixx call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), & - forcing_diag, field_loc, field_type) + debug_forcing, field_loc, field_type) if (ixp /= -99) then arg = arg + 1 nrec = recd + ixp call ice_read_nc & (fid, nrec, fieldname, field_data(:,:,arg,:), & - forcing_diag, field_loc, field_type) + debug_forcing, field_loc, field_type) endif if (my_task == master_task) call ice_close_nc (fid) @@ -1286,7 +1286,7 @@ subroutine interp_coeff_monthly (recslot) character(len=*), parameter :: subname = '(interp_coeff_monthly)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(secday_out=secday) call icepack_warnings_flush(nu_diag) @@ -1355,7 +1355,7 @@ subroutine interp_coeff (recnum, recslot, secint, dataloc) character(len=*), parameter :: subname = '(interp_coeff)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(secday_out=secday) call icepack_warnings_flush(nu_diag) @@ -1387,7 +1387,7 @@ subroutine interp_coeff (recnum, recslot, secint, dataloc) c1intp = abs((t2 - tt) / (t2 - t1)) c2intp = c1 - c1intp - if (forcing_debug .and. my_task == master_task) then + if (local_debug .and. my_task == master_task) then write(nu_diag,*) subname,'fdbg yday,sec = ',yday,msec write(nu_diag,*) subname,'fdbg tt = ',tt write(nu_diag,*) subname,'fdbg c12intp = ',c1intp,c2intp @@ -1408,7 +1408,7 @@ subroutine interp_coeff2 (tt, t1, t2) t1, t2 ! first+last decimal daynumber character(len=*), parameter :: subname = '(interp_coeff2)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' ! Compute coefficients c1intp = abs((t2 - tt) / (t2 - t1)) @@ -1438,7 +1438,7 @@ subroutine interpolate_data (field_data, field) character(len=*), parameter :: subname = '(interpolate data)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' !$OMP PARALLEL DO PRIVATE(iblk,i,j) do iblk = 1, nblocks @@ -1471,7 +1471,7 @@ subroutine file_year (data_file, yr) character(len=*), parameter :: subname = '(file_year)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' if (trim(atm_data_type) == 'hadgem') then ! netcdf i = index(data_file,'.nc') - 5 @@ -1559,7 +1559,7 @@ subroutine prepare_forcing (nx_block, ny_block, & character(len=*), parameter :: subname = '(prepare_forcing)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(Tffresh_out=Tffresh, puny_out=puny) call icepack_query_parameters(secday_out=secday) @@ -1779,7 +1779,7 @@ subroutine longwave_parkinson_washington(Tair, cldf, flw) character(len=*), parameter :: subname = '(longwave_parkinson_washington)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(Tffresh_out=Tffresh, & stefan_boltzmann_out=stefan_boltzmann) @@ -1831,7 +1831,7 @@ subroutine longwave_rosati_miyakoda(cldf, Tsfc, & character(len=*), parameter :: subname = '(longwave_rosati_miyakoda)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(Tffresh_out=Tffresh, & stefan_boltzmann_out=stefan_boltzmann, & @@ -1870,7 +1870,7 @@ subroutine ncar_files (yr) character(len=*), parameter :: subname = '(ncar_files)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' fsw_file = & trim(atm_data_dir)//'/MONTHLY/swdn.1996.dat' @@ -1943,7 +1943,7 @@ subroutine ncar_data character(len=*), parameter :: subname = '(ncar_data)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(secday_out=secday) call icepack_warnings_flush(nu_diag) @@ -2097,7 +2097,7 @@ subroutine LY_files (yr) character(len=*), parameter :: subname = '(LY_files)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' flw_file = & trim(atm_data_dir)//'/MONTHLY/cldf.omip.dat' @@ -2144,7 +2144,7 @@ subroutine JRA55_gx1_files(yr) character(len=*), parameter :: subname = '(JRA55_gx1_files)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' uwind_file = & trim(atm_data_dir)//'/8XDAILY/JRA55_03hr_forcing_2005.nc' @@ -2165,7 +2165,7 @@ subroutine JRA55_tx1_files(yr) character(len=*), parameter :: subname = '(JRA55_tx1_files)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' uwind_file = & trim(atm_data_dir)//'/8XDAILY/JRA55_03hr_forcing_tx1_2005.nc' @@ -2186,7 +2186,7 @@ subroutine JRA55_gx3_files(yr) character(len=*), parameter :: subname = '(JRA55_gx3_files)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' uwind_file = & trim(atm_data_dir)//'/8XDAILY/JRA55_gx3_03hr_forcing_2005.nc' @@ -2237,7 +2237,7 @@ subroutine LY_data character(len=*), parameter :: subname = '(LY_data)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(Tffresh_out=Tffresh) call icepack_query_parameters(secday_out=secday) @@ -2386,7 +2386,7 @@ subroutine LY_data ! Save record number oldrecnum = recnum - if (forcing_diag) then + if (debug_forcing) then if (my_task == master_task) write (nu_diag,*) 'LY_bulk_data' vmin = global_minval(fsw,distrb_info,tmask) @@ -2418,7 +2418,7 @@ subroutine LY_data if (my_task.eq.master_task) & write (nu_diag,*) 'Qa',vmin,vmax - endif ! forcing_diag + endif ! debug_forcing end subroutine LY_data @@ -2458,7 +2458,7 @@ subroutine JRA55_data character (char_len_long) :: uwind_file_old character(len=*), parameter :: subname = '(JRA55_data)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(Tffresh_out=Tffresh) call icepack_query_parameters(secday_out=secday) @@ -2469,7 +2469,7 @@ subroutine JRA55_data sec3hr = secday/c8 ! seconds in 3 hours maxrec = days_per_year * 8 - if (forcing_debug .and. my_task == master_task) then + if (local_debug .and. my_task == master_task) then write(nu_diag,*) subname,'fdbg dpy, maxrec = ',days_per_year,maxrec endif @@ -2521,7 +2521,7 @@ subroutine JRA55_data endif endif - if (forcing_debug .and. my_task == master_task) then + if (local_debug .and. my_task == master_task) then write(nu_diag,*) subname,'fdbg read recnum = ',recnum,n1 endif @@ -2545,37 +2545,37 @@ subroutine JRA55_data else fieldname = 'airtmp' - call ice_read_nc(ncid,recnum,fieldname,Tair_data(:,:,n1,:),forcing_debug, & + call ice_read_nc(ncid,recnum,fieldname,Tair_data(:,:,n1,:),local_debug, & field_loc=field_loc_center, & field_type=field_type_scalar) fieldname = 'wndewd' - call ice_read_nc(ncid,recnum,fieldname,uatm_data(:,:,n1,:),forcing_debug, & + call ice_read_nc(ncid,recnum,fieldname,uatm_data(:,:,n1,:),local_debug, & field_loc=field_loc_center, & field_type=field_type_scalar) fieldname = 'wndnwd' - call ice_read_nc(ncid,recnum,fieldname,vatm_data(:,:,n1,:),forcing_debug, & + call ice_read_nc(ncid,recnum,fieldname,vatm_data(:,:,n1,:),local_debug, & field_loc=field_loc_center, & field_type=field_type_scalar) fieldname = 'spchmd' - call ice_read_nc(ncid,recnum,fieldname,Qa_data(:,:,n1,:),forcing_debug, & + call ice_read_nc(ncid,recnum,fieldname,Qa_data(:,:,n1,:),local_debug, & field_loc=field_loc_center, & field_type=field_type_scalar) fieldname = 'glbrad' - call ice_read_nc(ncid,recnum,fieldname,fsw_data(:,:,n1,:),forcing_debug, & + call ice_read_nc(ncid,recnum,fieldname,fsw_data(:,:,n1,:),local_debug, & field_loc=field_loc_center, & field_type=field_type_scalar) fieldname = 'dlwsfc' - call ice_read_nc(ncid,recnum,fieldname,flw_data(:,:,n1,:),forcing_debug, & + call ice_read_nc(ncid,recnum,fieldname,flw_data(:,:,n1,:),local_debug, & field_loc=field_loc_center, & field_type=field_type_scalar) fieldname = 'ttlpcp' - call ice_read_nc(ncid,recnum,fieldname,fsnow_data(:,:,n1,:),forcing_debug, & + call ice_read_nc(ncid,recnum,fieldname,fsnow_data(:,:,n1,:),local_debug, & field_loc=field_loc_center, & field_type=field_type_scalar) endif ! copy data from n1=2 from last timestep to n1=1 @@ -2603,7 +2603,7 @@ subroutine JRA55_data call abort_ice (error_message=subname//' ERROR: c2intp out of range', & file=__FILE__, line=__LINE__) endif - if (forcing_debug .and. my_task == master_task) then + if (local_debug .and. my_task == master_task) then write(nu_diag,*) subname,'fdbg c12intp = ',c1intp,c2intp endif @@ -2644,7 +2644,7 @@ subroutine JRA55_data enddo ! iblk !$OMP END PARALLEL DO - if (forcing_diag .or. forcing_debug) then + if (debug_forcing .or. local_debug) then if (my_task.eq.master_task) write (nu_diag,*) subname,'fdbg JRA55_bulk_data' vmin = global_minval(fsw,distrb_info,tmask) vmax = global_maxval(fsw,distrb_info,tmask) @@ -2667,7 +2667,7 @@ subroutine JRA55_data vmin = global_minval(Qa,distrb_info,tmask) vmax = global_maxval(Qa,distrb_info,tmask) if (my_task.eq.master_task) write (nu_diag,*) subname,'fdbg Qa',vmin,vmax - endif ! forcing_diag + endif ! debug_forcing end subroutine JRA55_data @@ -2714,7 +2714,7 @@ subroutine compute_shortwave(nx_block, ny_block, & character(len=*), parameter :: subname = '(compute_shortwave)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(secday_out=secday, pi_out=pi) call icepack_warnings_flush(nu_diag) @@ -2778,7 +2778,7 @@ subroutine Qa_fixLY(nx_block, ny_block, Tair, Qa) character(len=*), parameter :: subname = '(Qa_fixLY)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(Tffresh_out=Tffresh, puny_out=puny) call icepack_warnings_flush(nu_diag) @@ -2822,7 +2822,7 @@ subroutine hadgem_files (yr) character(len=*), parameter :: subname = '(hadgem_files)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(calc_strair_out=calc_strair, & calc_Tsfc_out=calc_Tsfc) @@ -3022,7 +3022,7 @@ subroutine hadgem_data character(len=*), parameter :: subname = '(hadgem_data)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(Lsub_out=Lsub) call icepack_query_parameters(calc_strair_out=calc_strair, & @@ -3253,7 +3253,7 @@ subroutine monthly_files (yr) character(len=*), parameter :: subname = '(monthly_files)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' flw_file = & trim(atm_data_dir)//'/MONTHLY/cldf.omip.dat' @@ -3326,7 +3326,7 @@ subroutine monthly_data character(len=*), parameter :: subname = '(monthly_data)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' !------------------------------------------------------------------- ! monthly data @@ -3425,7 +3425,7 @@ subroutine monthly_data enddo ! iblk !$OMP END PARALLEL DO - if (forcing_diag) then + if (debug_forcing) then if (my_task == master_task) write (nu_diag,*) 'LY_bulk_data' vmin = global_minval(fsw,distrb_info,tmask) vmax = global_maxval(fsw,distrb_info,tmask) @@ -3460,7 +3460,7 @@ subroutine monthly_data if (my_task.eq.master_task) & write (nu_diag,*) 'Qa',vmin,vmax - endif ! forcing_diag + endif ! debug_forcing end subroutine monthly_data @@ -3507,7 +3507,7 @@ subroutine oned_data character(len=*), parameter :: subname = '(oned_data)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' diag = .false. ! write diagnostic information @@ -3584,7 +3584,7 @@ subroutine oned_files character(len=*), parameter :: subname = '(oned_files)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' fsw_file = & trim(atm_data_dir)//'/hourlysolar_brw1989_5yr.nc' @@ -3651,7 +3651,7 @@ subroutine ocn_data_clim (dt) character(len=*), parameter :: subname = '(ocn_data_clim)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' if (my_task == master_task .and. istep == 1) then if (trim(ocn_data_type)=='clim') then @@ -3809,7 +3809,7 @@ subroutine ocn_data_ncar_init character(len=*), parameter :: subname = '(ocn_data_ncar_init)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' if (my_task == master_task) then @@ -3861,10 +3861,10 @@ subroutine ocn_data_ncar_init ! Note: netCDF does single to double conversion if necessary ! if (n >= 4 .and. n <= 7) then -! call ice_read_nc(fid, m, vname(n), work1, forcing_diag, & +! call ice_read_nc(fid, m, vname(n), work1, debug_forcing, & ! field_loc_NEcorner, field_type_vector) ! else - call ice_read_nc(fid, m, vname(n), work1, forcing_diag, & + call ice_read_nc(fid, m, vname(n), work1, debug_forcing, & field_loc_center, field_type_scalar) ! endif @@ -3889,10 +3889,10 @@ subroutine ocn_data_ncar_init do m=1,12 nrec = nrec + 1 if (n >= 4 .and. n <= 7) then - call ice_read (nu_forcing, nrec, work1, 'rda8', forcing_diag, & + call ice_read (nu_forcing, nrec, work1, 'rda8', debug_forcing, & field_loc_NEcorner, field_type_vector) else - call ice_read (nu_forcing, nrec, work1, 'rda8', forcing_diag, & + call ice_read (nu_forcing, nrec, work1, 'rda8', debug_forcing, & field_loc_center, field_type_scalar) endif ocn_frc_m(:,:,:,n,m) = work1(:,:,:) @@ -3969,7 +3969,7 @@ subroutine ocn_data_ncar_init_3D character(len=*), parameter :: subname = '(ocn_data_ncar_init_3D)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' if (my_task == master_task) then @@ -4023,10 +4023,10 @@ subroutine ocn_data_ncar_init_3D ! Note: netCDF does single to double conversion if necessary if (n == 4 .or. n == 5) then ! 3D currents nzlev = 1 ! surface currents - call ice_read_nc_uv(fid, m, nzlev, vname(n), work1, forcing_diag, & + call ice_read_nc_uv(fid, m, nzlev, vname(n), work1, debug_forcing, & field_loc_center, field_type_scalar) else - call ice_read_nc(fid, m, vname(n), work1, forcing_diag, & + call ice_read_nc(fid, m, vname(n), work1, debug_forcing, & field_loc_center, field_type_scalar) endif @@ -4108,7 +4108,7 @@ subroutine ocn_data_ncar(dt) character(len=*), parameter :: subname = '(ocn_data_ncar)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' !------------------------------------------------------------------- ! monthly data @@ -4213,7 +4213,7 @@ subroutine ocn_data_ncar(dt) !$OMP END PARALLEL DO endif - if (forcing_diag) then + if (debug_forcing) then if (my_task == master_task) & write (nu_diag,*) 'ocn_data_ncar' vmin = global_minval(Tf,distrb_info,tmask) @@ -4267,7 +4267,7 @@ subroutine ocn_data_oned character(len=*), parameter :: subname = '(ocn_data_oned)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' sss (:,:,:) = 34.0_dbl_kind ! sea surface salinity (ppt) @@ -4324,7 +4324,7 @@ subroutine ocn_data_hadgem(dt) character(len=*), parameter :: subname = '(ocn_data_hadgem)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' !------------------------------------------------------------------- ! monthly data @@ -4482,7 +4482,7 @@ subroutine ocn_data_hycom_init character(len=*), parameter :: subname = '(ocn_data_hycom_init)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' if (trim(ocn_data_type) == 'hycom') then sss_file = trim(ocn_data_dir)//'ice.restart.surf.nc' @@ -4494,7 +4494,7 @@ subroutine ocn_data_hycom_init fieldname = 'sss' call ice_open_nc (sss_file, fid) - call ice_read_nc (fid, 1 , fieldname, sss, forcing_diag, & + call ice_read_nc (fid, 1 , fieldname, sss, debug_forcing, & field_loc_center, field_type_scalar) call ice_close_nc(fid) @@ -4509,7 +4509,7 @@ subroutine ocn_data_hycom_init fieldname = 'sst' call ice_open_nc (sst_file, fid) - call ice_read_nc (fid, 1 , fieldname, sst, forcing_diag, & + call ice_read_nc (fid, 1 , fieldname, sst, debug_forcing, & field_loc_center, field_type_scalar) call ice_close_nc(fid) @@ -4539,7 +4539,7 @@ subroutine hycom_atm_files varname ! variable name in netcdf file character(len=*), parameter :: subname = '(hycom_atm_files)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' fsw_file = trim(atm_data_dir)//'/forcing.shwflx.nc' flw_file = trim(atm_data_dir)//'/forcing.radflx.nc' @@ -4602,7 +4602,7 @@ subroutine hycom_atm_data character(len=*), parameter :: subname = '(hycom_atm_data)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(Tffresh_out=Tffresh) call icepack_query_parameters(secday_out=secday) @@ -4682,7 +4682,7 @@ subroutine hycom_atm_data endif ! Interpolate - if (forcing_diag) then + if (debug_forcing) then if (my_task == master_task) then write(nu_diag,*)'CICE: Atm. interpolate: = ',& hcdate,c1intp,c2intp @@ -4768,15 +4768,15 @@ subroutine read_data_nc_point (flag, recd, yr, ixm, ixx, ixp, & character(len=*), parameter :: subname = '(read_data_nc_point)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call ice_timer_start(timer_readwrite) ! reading/writing field_data = c0 ! to satisfy intent(out) attribute - if (istep1 > debug_model_step) forcing_diag = .true. !! debugging + if (istep1 > debug_model_step) debug_forcing = .true. !! debugging - if (my_task==master_task .and. (forcing_diag)) then + if (my_task==master_task .and. (debug_forcing)) then write(nu_diag,*) ' ', trim(data_file) endif @@ -4823,7 +4823,7 @@ subroutine read_data_nc_point (flag, recd, yr, ixm, ixx, ixp, & nrec = recd + n2 call ice_read_nc & - (fid, nrec, fieldname, field_data(arg), forcing_diag, & + (fid, nrec, fieldname, field_data(arg), debug_forcing, & field_loc, field_type) !if (ixx==1) call ice_close_nc(fid) @@ -4838,7 +4838,7 @@ subroutine read_data_nc_point (flag, recd, yr, ixm, ixx, ixp, & nrec = recd + ixx call ice_read_nc & - (fid, nrec, fieldname, field_data(arg), forcing_diag, & + (fid, nrec, fieldname, field_data(arg), debug_forcing, & field_loc, field_type) if (ixp /= -99) then @@ -4864,7 +4864,7 @@ subroutine read_data_nc_point (flag, recd, yr, ixm, ixx, ixp, & nrec = recd + n4 call ice_read_nc & - (fid, nrec, fieldname, field_data(arg), forcing_diag, & + (fid, nrec, fieldname, field_data(arg), debug_forcing, & field_loc, field_type) endif ! ixp /= -99 @@ -4882,7 +4882,7 @@ subroutine ISPOL_files character(len=*), parameter :: subname = '(ISPOL_files)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' fsw_file = & trim(atm_data_dir)//'/fsw_sfc_4Xdaily.nc' @@ -4975,7 +4975,7 @@ subroutine ISPOL_data character(len=*), parameter :: subname = '(ISPOL_data)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(secday_out=secday) call icepack_warnings_flush(nu_diag) @@ -5175,7 +5175,7 @@ subroutine ocn_data_ispol_init character(len=*), parameter :: subname = '(ocn_data_ispol_init)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' if (my_task == master_task) then @@ -5202,10 +5202,10 @@ subroutine ocn_data_ispol_init do m=1,12 ! Note: netCDF does single to double conversion if necessary if (n >= 4 .and. n <= 7) then - call ice_read_nc(fid, m, vname(n), work, forcing_diag, & + call ice_read_nc(fid, m, vname(n), work, debug_forcing, & field_loc_NEcorner, field_type_vector) else - call ice_read_nc(fid, m, vname(n), work, forcing_diag, & + call ice_read_nc(fid, m, vname(n), work, debug_forcing, & field_loc_center, field_type_scalar) endif ocn_frc_m(:,:,:,n,m) = work @@ -5255,7 +5255,7 @@ subroutine box2001_data character(len=*), parameter :: subname = '(box2001_data)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call icepack_query_parameters(pi_out=pi, pi2_out=pi2, puny_out=puny) call icepack_query_parameters(secday_out=secday) @@ -5348,7 +5348,7 @@ subroutine get_wave_spec logical (kind=log_kind) :: wave_spec character(len=*), parameter :: subname = '(get_wave_spec)' - if (forcing_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' + if (local_debug .and. my_task == master_task) write(nu_diag,*) subname,'fdbg start' call ice_timer_start(timer_fsd) @@ -5361,7 +5361,7 @@ subroutine get_wave_spec ! if no wave data is provided, wave_spectrum is zero everywhere wave_spectrum(:,:,:,:) = c0 wave_spec_dir = ocn_data_dir - forcing_diag = .false. + debug_forcing = .false. ! wave spectrum and frequencies if (wave_spec) then @@ -5379,7 +5379,7 @@ subroutine get_wave_spec else #ifdef USE_NETCDF call ice_open_nc(wave_spec_file,fid) - call ice_read_nc_xyf (fid, 1, 'efreq', wave_spectrum(:,:,:,:), forcing_diag, & + call ice_read_nc_xyf (fid, 1, 'efreq', wave_spectrum(:,:,:,:), debug_forcing, & field_loc_center, field_type_scalar) call ice_close_nc(fid) #else diff --git a/cicecore/cicedynB/general/ice_forcing_bgc.F90 b/cicecore/cicedynB/general/ice_forcing_bgc.F90 index d9408c304..b7c39ad4c 100644 --- a/cicecore/cicedynB/general/ice_forcing_bgc.F90 +++ b/cicecore/cicedynB/general/ice_forcing_bgc.F90 @@ -17,7 +17,7 @@ module ice_forcing_bgc use ice_calendar, only: dt, istep, msec, mday, mmonth use ice_fileunits, only: nu_diag use ice_arrays_column, only: restore_bgc, & - bgc_data_dir, fe_data_type + bgc_data_dir, fe_data_type, optics_file use ice_constants, only: c0, p1 use ice_constants, only: field_loc_center, field_type_scalar use ice_exit, only: abort_ice @@ -861,7 +861,7 @@ subroutine faero_optics kaer_bc_tab, & ! BC mass extinction cross section (m2/kg) waer_bc_tab, & ! BC single scatter albedo (fraction) gaer_bc_tab, & ! BC aerosol asymmetry parameter (cos(theta)) - bcenh ! BC absorption enhancement facto + bcenh ! BC absorption enhancement factor #ifdef USE_NETCDF use netcdf @@ -883,7 +883,6 @@ subroutine faero_optics fid ! file id for netCDF file character (char_len_long) :: & - optics_file, & ! netcdf filename fieldname ! field name in netcdf file character(len=*), parameter :: subname = '(faero_optics)' @@ -963,20 +962,16 @@ subroutine faero_optics if (modal_aero) then #ifdef USE_NETCDF - optics_file = & - '/usr/projects/climate/njeffery/DATA/CAM/snicar/snicar_optics_5bnd_mam_c140303.nc' - if (my_task == master_task) then - write (nu_diag,*) ' ' - write (nu_diag,*) 'Read optics for modal aerosol treament in' - write (nu_diag,*) trim(optics_file) - call ice_open_nc(optics_file,fid) - endif + write (nu_diag,*) ' ' + write (nu_diag,*) 'Read optics for modal aerosol treament in' + write (nu_diag,*) trim(optics_file) + call ice_open_nc(optics_file,fid) - fieldname='bcint_enh_mam_cice' - if (my_task == master_task) then +! fieldname='bcint_enh_mam_cice' + fieldname='modalBCabsorptionParameter5band' - status = nf90_inq_varid(fid, trim(fieldname), varid) + status = nf90_inq_varid(fid, trim(fieldname), varid) if (status /= nf90_noerr) then call abort_ice (subname//'ERROR: Cannot find variable '//trim(fieldname)) @@ -985,20 +980,20 @@ subroutine faero_optics start=(/1,1,1,1/), & count=(/3,10,8,1/) ) do n=1,10 - amin = minval(bcenh(:,n,:)) - amax = maxval(bcenh(:,n,:)) - asum = sum (bcenh(:,n,:)) - write(nu_diag,*) ' min, max, sum =', amin, amax, asum + amin = minval(bcenh(:,n,:)) + amax = maxval(bcenh(:,n,:)) + asum = sum (bcenh(:,n,:)) + write(nu_diag,*) ' min, max, sum =', amin, amax, asum enddo call ice_close_nc(fid) - endif !master_task - do n=1,3 - do k=1,8 - call broadcast_array(bcenh(n,:,k), master_task) - enddo - enddo + endif !master_task + do n=1,3 + do k=1,8 + call broadcast_array(bcenh(n,:,k), master_task) + enddo + enddo #else - call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & + call abort_ice(subname//'ERROR: USE_NETCDF cpp not defined', & file=__FILE__, line=__LINE__) #endif endif ! modal_aero diff --git a/cicecore/cicedynB/general/ice_init.F90 b/cicecore/cicedynB/general/ice_init.F90 index 5e5fd144f..f1c0b8c19 100644 --- a/cicecore/cicedynB/general/ice_init.F90 +++ b/cicecore/cicedynB/general/ice_init.F90 @@ -59,7 +59,8 @@ subroutine input_data use ice_broadcast, only: broadcast_scalar, broadcast_array use ice_diagnostics, only: diag_file, print_global, print_points, latpnt, lonpnt, & - debug_model, debug_model_step + debug_model, debug_model_step, debug_model_task, & + debug_model_i, debug_model_j, debug_model_iblk use ice_domain, only: close_boundaries, orca_halogrid use ice_domain_size, only: ncat, nilyr, nslyr, nblyr, nfsd, nfreq, & n_iso, n_aero, n_zaero, n_algae, & @@ -84,7 +85,7 @@ subroutine input_data use ice_flux, only: default_season use ice_flux_bgc, only: cpl_bgc use ice_forcing, only: & - ycycle, fyear_init, forcing_diag, & + ycycle, fyear_init, debug_forcing, & atm_data_type, atm_data_dir, precip_units, rotate_wind, & atm_data_format, ocn_data_format, & bgc_data_type, & @@ -164,9 +165,10 @@ subroutine input_data pointer_file, dumpfreq, dumpfreq_n, dump_last, & diagfreq, diag_type, diag_file, history_format,& print_global, print_points, latpnt, lonpnt, & - forcing_diag, histfreq, histfreq_n, hist_avg, & + debug_forcing, histfreq, histfreq_n, hist_avg, & history_dir, history_file, history_precision, cpl_bgc, & conserv_check, debug_model, debug_model_step, & + debug_model_i, debug_model_j, debug_model_iblk, debug_model_task, & year_init, month_init, day_init, sec_init, & write_ic, incond_dir, incond_file, version_name @@ -267,7 +269,11 @@ subroutine input_data npt_unit = '1' ! units of npt 'y', 'm', 'd', 's', '1' diagfreq = 24 ! how often diag output is written debug_model = .false. ! debug output - debug_model_step = 999999999 ! debug model after this step number + debug_model_step = 0 ! debug model after this step number + debug_model_i = -1 ! debug model local i index + debug_model_j = -1 ! debug model local j index + debug_model_iblk = -1 ! debug model local iblk number + debug_model_task = -1 ! debug model local task number print_points = .false. ! if true, print point data print_global = .true. ! if true, print global diagnostic data bfbflag = 'off' ! off = optimized @@ -436,7 +442,7 @@ subroutine input_data restore_ocn = .false. ! restore sst if true trestore = 90 ! restoring timescale, days (0 instantaneous) restore_ice = .false. ! restore ice state on grid edges if true - forcing_diag = .false. ! true writes diagnostics for input forcing + debug_forcing = .false. ! true writes diagnostics for input forcing latpnt(1) = 90._dbl_kind ! latitude of diagnostic point 1 (deg) lonpnt(1) = 0._dbl_kind ! longitude of point 1 (deg) @@ -604,6 +610,10 @@ subroutine input_data call broadcast_scalar(diagfreq, master_task) call broadcast_scalar(debug_model, master_task) call broadcast_scalar(debug_model_step, master_task) + call broadcast_scalar(debug_model_i, master_task) + call broadcast_scalar(debug_model_j, master_task) + call broadcast_scalar(debug_model_iblk, master_task) + call broadcast_scalar(debug_model_task, master_task) call broadcast_scalar(print_points, master_task) call broadcast_scalar(print_global, master_task) call broadcast_scalar(bfbflag, master_task) @@ -758,7 +768,7 @@ subroutine input_data call broadcast_scalar(restore_ocn, master_task) call broadcast_scalar(trestore, master_task) call broadcast_scalar(restore_ice, master_task) - call broadcast_scalar(forcing_diag, master_task) + call broadcast_scalar(debug_forcing, master_task) call broadcast_array (latpnt(1:2), master_task) call broadcast_array (lonpnt(1:2), master_task) call broadcast_scalar(runid, master_task) @@ -1649,6 +1659,10 @@ subroutine input_data write(nu_diag,1011) ' print_points = ', print_points write(nu_diag,1011) ' debug_model = ', debug_model write(nu_diag,1022) ' debug_model_step = ', debug_model_step + write(nu_diag,1021) ' debug_model_i = ', debug_model_i + write(nu_diag,1021) ' debug_model_i = ', debug_model_j + write(nu_diag,1021) ' debug_model_iblk = ', debug_model_iblk + write(nu_diag,1021) ' debug_model_task = ', debug_model_task write(nu_diag,1031) ' bfbflag = ', trim(bfbflag) write(nu_diag,1021) ' numin = ', numin write(nu_diag,1021) ' numax = ', numax diff --git a/cicecore/shared/ice_arrays_column.F90 b/cicecore/shared/ice_arrays_column.F90 index 06efd6e94..7b1f2ee15 100644 --- a/cicecore/shared/ice_arrays_column.F90 +++ b/cicecore/shared/ice_arrays_column.F90 @@ -267,6 +267,9 @@ module ice_arrays_column character(char_len_long), public :: & bgc_data_dir ! directory for biogeochemistry data + character(char_len_long), public :: & + optics_file ! modal aero optics file + real (kind=dbl_kind), dimension(:), allocatable, public :: & R_C2N_DON ! carbon to nitrogen mole ratio of DON pool @@ -305,12 +308,12 @@ subroutine alloc_arrays_column ! Allocate column arrays use ice_exit, only: abort_ice integer (int_kind) :: max_nbtrcr, max_algae, max_aero, & - nmodal1, nmodal2, max_don, nbtrcr_sw + nmodal1, nmodal2, max_don integer (int_kind) :: ierr, ntrcr character(len=*),parameter :: subname='(alloc_arrays_column)' - call icepack_query_tracer_sizes(ntrcr_out=ntrcr, nbtrcr_sw_out=nbtrcr_sw) + call icepack_query_tracer_sizes(ntrcr_out=ntrcr) call icepack_query_tracer_sizes( max_nbtrcr_out=max_nbtrcr, & max_algae_out=max_algae, max_aero_out=max_aero, & nmodal1_out=nmodal1, nmodal2_out=nmodal2, max_don_out=max_don) @@ -396,8 +399,7 @@ subroutine alloc_arrays_column ocean_bio_all(nx_block,ny_block,max_nbtrcr,max_blocks), & ! fixed order, all values even for tracers false ice_bio_net (nx_block,ny_block,max_nbtrcr,max_blocks), & ! depth integrated tracer (mmol/m^2) snow_bio_net (nx_block,ny_block,max_nbtrcr,max_blocks), & ! depth integrated snow tracer (mmol/m^2) - trcrn_sw (nx_block,ny_block,nbtrcr_sw,ncat,max_blocks), & ! bgc tracers active in the delta-Eddington shortwave - algal_peak (nx_block,ny_block,max_algae,max_blocks), & ! vertical location of algal maximum, 0 if no maximum + algal_peak (nx_block,ny_block,max_algae ,max_blocks), & ! vertical location of algal maximum, 0 if no maximum stat=ierr) if (ierr/=0) call abort_ice(subname//': Out of Memory2') diff --git a/cicecore/shared/ice_init_column.F90 b/cicecore/shared/ice_init_column.F90 index 1362e055e..746f42574 100644 --- a/cicecore/shared/ice_init_column.F90 +++ b/cicecore/shared/ice_init_column.F90 @@ -7,6 +7,7 @@ module ice_init_column use ice_kinds_mod + use ice_blocks, only: nx_block, ny_block use ice_constants use ice_communicate, only: my_task, master_task, ice_barrier use ice_domain_size, only: ncat, max_blocks @@ -129,7 +130,6 @@ module ice_init_column subroutine init_thermo_vertical - use ice_blocks, only: nx_block, ny_block use ice_flux, only: salinz, Tmltz integer (kind=int_kind) :: & @@ -186,7 +186,7 @@ subroutine init_shortwave fswintn, albpndn, apeffn, trcrn_sw, dhsn, ffracn, snowfracn, & kaer_tab, waer_tab, gaer_tab, kaer_bc_tab, waer_bc_tab, gaer_bc_tab, bcenh, & swgrid, igrid - use ice_blocks, only: block, get_block, nx_block, ny_block + use ice_blocks, only: block, get_block use ice_calendar, only: dt, calendar_type, & days_per_year, nextsw_cday, yday, msec use ice_diagnostics, only: npnt, print_points, pmloc, piloc, pjloc @@ -594,7 +594,6 @@ subroutine init_fsd(floesize) use ice_arrays_column, only: floe_rad_c, floe_binwidth, & wavefreq, dwavefreq, wave_sig_ht, wave_spectrum, & d_afsd_newi, d_afsd_latg, d_afsd_latm, d_afsd_wave, d_afsd_weld - use ice_blocks, only: nx_block, ny_block use ice_domain_size, only: ncat, max_blocks, nfsd use ice_init, only: ice_ic use ice_state, only: aicen @@ -1005,7 +1004,7 @@ end subroutine init_hbrine subroutine input_zbgc - use ice_arrays_column, only: restore_bgc + use ice_arrays_column, only: restore_bgc, optics_file use ice_broadcast, only: broadcast_scalar use ice_restart_column, only: restart_bgc, restart_zsal, & restart_hbrine @@ -1048,7 +1047,7 @@ subroutine input_zbgc restore_bgc, restart_bgc, scale_bgc, solve_zsal, restart_zsal, & tr_bgc_Nit, tr_bgc_C, tr_bgc_chl, tr_bgc_Am, tr_bgc_Sil, & tr_bgc_DMS, tr_bgc_PON, tr_bgc_hum, tr_bgc_DON, tr_bgc_Fe, & - grid_o, grid_o_t, l_sk, grid_oS, & + grid_o, grid_o_t, l_sk, grid_oS, optics_file, & l_skS, phi_snow, initbio_frac, frazil_scav, & ratio_Si2N_diatoms , ratio_Si2N_sp , ratio_Si2N_phaeo , & ratio_S2N_diatoms , ratio_S2N_sp , ratio_S2N_phaeo , & @@ -1105,6 +1104,7 @@ subroutine input_zbgc tr_brine = .false. ! brine height differs from ice height tr_zaero = .false. ! z aerosol tracers modal_aero = .false. ! use modal aerosol treatment of aerosols + optics_file = 'unknown_optics_file' ! modal aerosol optics file restore_bgc = .false. ! restore bgc if true solve_zsal = .false. ! update salinity tracer profile from solve_S_dt restart_bgc = .false. ! biogeochemistry restart @@ -1321,6 +1321,7 @@ subroutine input_zbgc call broadcast_scalar(tr_zaero, master_task) call broadcast_scalar(dEdd_algae, master_task) call broadcast_scalar(modal_aero, master_task) + call broadcast_scalar(optics_file, master_task) call broadcast_scalar(grid_o, master_task) call broadcast_scalar(grid_o_t, master_task) call broadcast_scalar(l_sk, master_task) @@ -1690,6 +1691,7 @@ subroutine input_zbgc write(nu_diag,1010) ' solve_zbgc = ', solve_zbgc write(nu_diag,1010) ' tr_zaero = ', tr_zaero write(nu_diag,1020) ' number of aerosols = ', n_zaero + write(nu_diag,1031) ' optics_file = ', trim(optics_file) ! bio parameters write(nu_diag,1000) ' grid_o = ', grid_o write(nu_diag,1000) ' grid_o_t = ', grid_o_t @@ -1747,6 +1749,7 @@ subroutine input_zbgc 1010 format (a30,2x,l6) ! logical 1020 format (a30,2x,i6) ! integer 1030 format (a30, a8) ! character + 1031 format (a30, a ) ! character end subroutine input_zbgc @@ -2280,7 +2283,7 @@ subroutine init_zbgc use ice_state, only: trcr_base, trcr_depend, n_trcr_strata, & nt_strata - use ice_arrays_column, only: R_C2N, R_chl2N, R_C2N_DON, R_Si2N + use ice_arrays_column, only: R_C2N, R_chl2N, R_C2N_DON, R_Si2N, trcrn_sw integer (kind=int_kind) :: & nbtrcr, nbtrcr_sw, nt_fbri, & @@ -2948,6 +2951,10 @@ subroutine init_zbgc endif if (.NOT. dEdd_algae) nbtrcr_sw = 1 + ! tcraig, added 6/1/21, why is nbtrcr_sw set here? + call icepack_init_tracer_sizes(nbtrcr_sw_in=nbtrcr_sw) + allocate(trcrn_sw(nx_block,ny_block,nbtrcr_sw,ncat,max_blocks)) ! bgc tracers active in the delta-Eddington shortwave + !----------------------------------------------------------------- ! spew !----------------------------------------------------------------- diff --git a/configuration/scripts/cice.settings b/configuration/scripts/cice.settings index 3bd85f5f9..1faf2c5be 100755 --- a/configuration/scripts/cice.settings +++ b/configuration/scripts/cice.settings @@ -42,5 +42,5 @@ if (${ICE_NTASKS} == 1) setenv ICE_COMMDIR serial ### Specialty code setenv ICE_BLDDEBUG false # build debug flags -setenv ICE_COVERAGE false # build debug flags +setenv ICE_COVERAGE false # build coverage flags diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index e5fcb9177..74fc548f0 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -30,8 +30,12 @@ diag_type = 'stdout' diag_file = 'ice_diag.d' debug_model = .false. - debug_model_step = 999999999 - forcing_diag = .false. + debug_model_step = 0 + debug_model_i = -1 + debug_model_j = -1 + debug_model_iblk = -1 + debug_model_task = -1 + debug_forcing = .false. print_global = .true. print_points = .true. conserv_check = .false. @@ -258,6 +262,7 @@ restart_hbrine = .false. tr_zaero = .false. modal_aero = .false. + optics_file = 'ICE_MACHINE_INPUTDATA/CICE_data/forcing/snicar_optics_5bnd_snow_and_aerosols.nc' skl_bgc = .false. z_tracers = .false. dEdd_algae = .false. diff --git a/configuration/scripts/options/set_nml.bigdiag b/configuration/scripts/options/set_nml.bigdiag index a98bc0c2b..95d752af6 100644 --- a/configuration/scripts/options/set_nml.bigdiag +++ b/configuration/scripts/options/set_nml.bigdiag @@ -1,4 +1,4 @@ -forcing_diag = .true. +debug_forcing = .true. debug_model = .true. debug_model_step = 4 print_global = .true. diff --git a/configuration/scripts/options/set_nml.diagpt1 b/configuration/scripts/options/set_nml.diagpt1 new file mode 100644 index 000000000..baaa564e6 --- /dev/null +++ b/configuration/scripts/options/set_nml.diagpt1 @@ -0,0 +1,5 @@ +# this local point is hardwired to (85,-150) for gx3, 7x2x5x29x12 roundrobin +debug_model_i = 3 +debug_model_j = 22 +debug_model_iblk = 11 +debug_model_task = 0 diff --git a/configuration/scripts/options/set_nml.dwghtfile b/configuration/scripts/options/set_nml.dwghtfile index d72b0fb8a..33bb2d29f 100644 --- a/configuration/scripts/options/set_nml.dwghtfile +++ b/configuration/scripts/options/set_nml.dwghtfile @@ -1,3 +1,2 @@ distribution_type = 'wghtfile' distribution_wght = 'file' - distribution_wght_file = 'ICE_MACHINE_INPUTDATA/CICE_data/grid/gx1/cice62_gx1_wghtmask.nc' diff --git a/configuration/scripts/options/set_nml.gx1 b/configuration/scripts/options/set_nml.gx1 index 2e8d4f5b7..50615e81e 100644 --- a/configuration/scripts/options/set_nml.gx1 +++ b/configuration/scripts/options/set_nml.gx1 @@ -19,3 +19,4 @@ atm_data_dir = 'ICE_MACHINE_INPUTDATA/CICE_data/forcing/gx1/JRA55' precip_units = 'mks' ocn_data_dir = 'ICE_MACHINE_INPUTDATA/CICE_data/forcing/gx1/CESM/MONTHLY' bgc_data_dir = 'ICE_MACHINE_INPUTDATA/CICE_data/forcing/gx1/WOA/MONTHLY' +distribution_wght_file = 'ICE_MACHINE_INPUTDATA/CICE_data/grid/gx1/cice62_gx1_wghtmask.nc' diff --git a/configuration/scripts/tests/base_suite.ts b/configuration/scripts/tests/base_suite.ts index c37750a31..9804052ad 100644 --- a/configuration/scripts/tests/base_suite.ts +++ b/configuration/scripts/tests/base_suite.ts @@ -5,27 +5,32 @@ smoke gx3 1x4 debug,diag1,run2day smoke gx3 4x1 debug,diag1,run5day restart gx3 8x2 debug smoke gx3 8x2 diag24,run1year,medium -smoke gx3 7x2 diag1,bigdiag,run1day +smoke gx3 7x2 diag1,bigdiag,run1day,diagpt1 decomp gx3 4x2x25x29x5 none smoke gx3 4x2 diag1,run5day smoke_gx3_8x2_diag1_run5day smoke gx3 4x1 diag1,run5day,thread smoke_gx3_8x2_diag1_run5day -restart gx1 40x4 droundrobin,medium -restart tx1 40x4 dsectrobin,medium +restart gx1 40x4 droundrobin +restart tx1 40x4 dsectrobin +restart tx1 60x2 droundrobin,maskhalo restart gx3 4x4 none +restart gx3 10x4 maskhalo restart gx3 6x2 alt01 restart gx3 8x2 alt02 restart gx3 4x2 alt03 +restart gx3 12x2 alt03,maskhalo,droundrobin restart gx3 4x4 alt04 restart gx3 4x4 alt05 restart gx3 8x2 alt06 +restart gx3 18x2 debug,maskhalo restart gx3 6x2 alt01,debug,short restart gx3 8x2 alt02,debug,short restart gx3 4x2 alt03,debug,short +smoke gx3 12x2 alt03,debug,short,maskhalo,droundrobin smoke gx3 4x4 alt04,debug,short smoke gx3 4x4 alt05,debug,short smoke gx3 8x2 alt06,debug,short smoke gx3 10x2 debug,diag1,run5day,gx3sep2 -smoke gx3 7x2 diag1,bigdiag,run1day +smoke gx3 7x2x5x29x12 diag1,bigdiag,run1day,debug restart gbox128 4x2 short restart gbox128 4x2 boxnodyn,short restart gbox128 4x2 boxnodyn,short,debug @@ -44,20 +49,20 @@ restart gx1 8x1 bgczclim,medium smoke gx1 24x1 medium,run90day,yi2008 smoke gx3 8x1 medium,run90day,yi2008 restart gx1 24x1 short -restart gx1 16x2 seabedLKD,gx1apr,medium,debug -restart gx1 15x2 seabedprob,medium -restart gx1 32x1 gx1prod,medium +restart gx1 16x2 seabedLKD,gx1apr,short,debug +restart gx1 15x2 seabedprob +restart gx1 32x1 gx1prod smoke gx3 4x2 fsd1,diag24,run5day,debug -smoke gx3 8x2 fsd12,diag24,run5day,short +smoke gx3 8x2 fsd12,diag24,run5day restart gx3 4x2 fsd12,debug,short -smoke gx3 8x2 fsd12ww3,diag24,run1day,medium +smoke gx3 8x2 fsd12ww3,diag24,run1day smoke gx3 4x1 isotope,debug restart gx3 8x2 isotope restart gx3 4x4 gx3ncarbulk,iobinary restart gx3 4x4 histall,precision8,cdf64 smoke gx3 30x1 bgcz,histall smoke gx3 14x2 fsd12,histall -smoke gx3 4x1 dynpicard,medium +smoke gx3 4x1 dynpicard smoke gx3 8x2 diag24,run5day,zsal,debug restart gx3 8x2 zsal restart gx3 8x2 gx3ncarbulk,debug diff --git a/configuration/scripts/tests/unittest_suite.ts b/configuration/scripts/tests/unittest_suite.ts index 2e9dcc7cf..55406fce8 100644 --- a/configuration/scripts/tests/unittest_suite.ts +++ b/configuration/scripts/tests/unittest_suite.ts @@ -1,4 +1,4 @@ # Test Grid PEs Sets BFB-compare unittest gx3 1x1 helloworld -unittest gx3 1x1 calchk +unittest gx3 1x1 calchk,short diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 9e2868947..365385e25 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -141,8 +141,14 @@ either Celsius or Kelvin units). "days_per_year", ":math:`\bullet` number of days in one year", "365" "day_init", ":math:`\bullet` the initial day of the month", "" "dbl_kind", "definition of double precision", "selected_real_kind(13)" + "debug_blocks", ":math:`\bullet` write extra diagnostics for blocks and decomposition", ".false." + "debug_forcing", ":math:`\bullet` write extra diagnostics for forcing inputs", ".false." "debug_model", "Logical that controls extended model point debugging.", "" - "debug_model_step", "Initial timestep for output associated with debug_model.", "" + "debug_model_i", "Local i gridpoint that defines debug_model point output.", "" + "debug_model_iblk", "Local iblk value that defines debug_model point output.", "" + "debug_model_j", "Local j gridpoint that defines debug_model point output.", "" + "debug_model_task", "Local mpi task value that defines debug_model point output.", "" + "debug_model_step", "Initial timestep for output from the debug_model flag.", "" "Delta", "function of strain rates (see Section :ref:`dynam`)", "1/s" "default_season", "Season from which initial values of forcing are set.", "winter" "denom1", "combination of constants for stress equation", "" @@ -231,7 +237,6 @@ either Celsius or Kelvin units). "flw", "incoming longwave radiation", "W/m\ :math:`^2`" "flwout", "outgoing longwave radiation", "W/m\ :math:`^2`" "fm", "Coriolis parameter * mass in U cell", "kg/s" - "forcing_diag", ":math:`\bullet` write extra diagnostics for forcing inputs", ".false." "formdrag", ":math:`\bullet` calculate form drag", "" "fpond", "fresh water flux to ponds", "kg/m\ :math:`^2`/s" "fr_resp", "bgc respiration fraction", "0.05" diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 44ee6f5b0..355b5665d 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -145,8 +145,13 @@ setup_nml "``cpl_bgc``", "logical", "couple bgc thru driver", "``.false.``" "``days_per_year``", "integer", "number of days in a model year", "365" "``day_init``", "integer", "the initial day of the month if not using restart", "1" + "``debug_forcing``", "logical", "write extra forcing diagnostics", "``.false.``" "``debug_model``", "logical", "write extended model point diagnostics", "``.false.``" - "``debug_model_step``", "logical", "initial timestep to write ``debug_model`` output", "999999999" + "``debug_model_i``", "integer", "local i index of debug_model point", "-1" + "``debug_model_iblk``", "integer", "iblk value for debug_model point", "-1" + "``debug_model_j``", "integer", "local j index of debug_model point", "-1" + "``debug_model_task``", "integer", "mpi task value for debug_model point", "-1" + "``debug_model_step``", "logical", "initial timestep to write ``debug_model`` output", "0" "``diagfreq``", "integer", "frequency of diagnostic output in timesteps", "24" "``diag_type``", "``stdout``", "write diagnostic output to stdout", "``stdout``" "", "``file``", "write diagnostic output to file", "" @@ -159,7 +164,6 @@ setup_nml "", "``1``", "write restart every ``dumpfreq_n`` time step", "" "``dumpfreq_n``", "integer", "write restart frequency with ``dumpfreq``", "1" "``dump_last``", "logical", "write restart on last time step of simulation", "``.false.``" - "``forcing_diag``", "logical", "write extra diagnostics", "``.false.``" "``hist_avg``", "logical", "write time-averaged data", "``.true.``" "``histfreq``", "``d``", "write history every ``histfreq_n`` days", "'1','h','d','m','y'" "", "``h``", "write history every ``histfreq_n`` hours", "" @@ -654,6 +658,7 @@ zbgc_nml "``mu_max_phaeo``", "real", "maximum growth rate phaeocystis per day", "0.851" "``mu_max_sp``", "real", "maximum growth rate small plankton per day", "0.851" "``nitratetype``", "real", "mobility type between stationary and mobile nitrate", "-1.0" + "``optics_file``", "string", "optics file associated with modal aersols", "unknown_optics_file" "``op_dep_min``", "real", "light attenuates for optical depths exceeding min", "0.1" "``phi_snow``", "real", "snow porosity for brine height tracer", "0.5" "``ratio_chl2N_diatoms``", "real", "algal chl to N in mg/mmol diatoms", "2.1" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 566d10fbc..8a733f4cc 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -916,15 +916,28 @@ output is written to a log file. The log file unit to which diagnostic output is written is set in **ice\_fileunits.F90**. If ``diag_type`` = ‘stdout’, then it is written to standard out (or to **ice.log.[ID]** if you redirect standard out as in **cice.run**); otherwise it is written -to the file given by ``diag_file``. In addition to the standard diagnostic +to the file given by ``diag_file``. + +In addition to the standard diagnostic output (maximum area-averaged thickness, velocity, average albedo, total ice area, and total ice and snow volumes), the namelist options ``print_points`` and ``print_global`` cause additional diagnostic information to be computed and written. ``print_global`` outputs global sums that are useful for checking global conservation of mass and energy. -``print_points`` writes data for two specific grid points. Currently, one +``print_points`` writes data for two specific grid points defined by the +input namelist ``lonpnt`` and ``latpnt``. By default, one point is near the North Pole and the other is in the Weddell Sea; these -may be changed in **ice\_in**. +may be changed in **ice\_in**. + +The namelist ``debug_model`` prints detailed +debug diagnostics for a single point as the model advances. The point is defined +by the namelist ``debug_model_i``, ``debug_model_j``, ``debug_model_iblk``, +and ``debug_model_task``. These are the local i, j, block, and mpi task index values +of the point to be diagnosed. This point is defined in local index space +and can be values in the array halo. If the local point is not defined in +namelist, the point associated with ``lonpnt(1)`` and ``latpnt(1)`` is used. +``debug_model`` is normally used when the model aborts and needs to be debugged +in detail at a particular (usually failing) grid point. Timers are declared and initialized in **ice\_timers.F90**, and the code to be timed is wrapped with calls to *ice\_timer\_start* and diff --git a/doc/source/user_guide/ug_troubleshooting.rst b/doc/source/user_guide/ug_troubleshooting.rst index a8a9c2c4d..f400673ac 100644 --- a/doc/source/user_guide/ug_troubleshooting.rst +++ b/doc/source/user_guide/ug_troubleshooting.rst @@ -135,6 +135,18 @@ conflicts in module dependencies. `debug\_model` = true (**ice\_in**) Print extended diagnostics for the first point associated with `print\_points`. +`debug\_model\_i` = integer (**ice\_in**) + Defines the local i index for the point to be diagnosed with `debug\_model`. + +`debug\_model\_j` = integer (**ice\_in**) + Defines the local j index for the point to be diagnosed with `debug\_model`. + +`debug\_model\_iblk` = integer (**ice\_in**) + Defines the local iblk value for the point to be diagnosed with `debug\_model`. + +`debug\_model\_task` = integer (**ice\_in**) + Defines the local task value for the point to be diagnosed with `debug\_model`. + `debug\_model\_step` = true (**ice\_in**) Timestep to starting printing diagnostics associated with `debug\_model`. From 371c7d89334dca3e51d68d4360e39b137f183a5a Mon Sep 17 00:00:00 2001 From: apcraig Date: Fri, 4 Jun 2021 10:47:20 -0600 Subject: [PATCH 2/6] add sumchk unit test to test global reduction methods --- .../drivers/unittest/sumchk/CICE_FinalMod.F90 | 70 ++ .../drivers/unittest/sumchk/CICE_InitMod.F90 | 486 +++++++++++++ cicecore/drivers/unittest/sumchk/sumchk.F90 | 641 ++++++++++++++++++ configuration/scripts/Makefile | 8 +- configuration/scripts/options/set_env.sumchk | 2 + configuration/scripts/tests/unittest_suite.ts | 9 +- 6 files changed, 1210 insertions(+), 6 deletions(-) create mode 100644 cicecore/drivers/unittest/sumchk/CICE_FinalMod.F90 create mode 100644 cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 create mode 100644 cicecore/drivers/unittest/sumchk/sumchk.F90 create mode 100644 configuration/scripts/options/set_env.sumchk diff --git a/cicecore/drivers/unittest/sumchk/CICE_FinalMod.F90 b/cicecore/drivers/unittest/sumchk/CICE_FinalMod.F90 new file mode 100644 index 000000000..a59c210aa --- /dev/null +++ b/cicecore/drivers/unittest/sumchk/CICE_FinalMod.F90 @@ -0,0 +1,70 @@ +!======================================================================= +! +! This module contains routines for the final exit of the CICE model, +! including final output and clean exit from any message passing +! environments and frameworks. +! +! authors: Philip W. Jones, LANL +! 2006: Converted to free source form (F90) by Elizabeth Hunke +! 2008: E. Hunke moved ESMF code to its own driver + + module CICE_FinalMod + + use ice_kinds_mod + use ice_communicate, only: my_task, master_task + use ice_exit, only: end_run, abort_ice + use ice_fileunits, only: nu_diag, release_all_fileunits + use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted + + implicit none + private + public :: CICE_Finalize + +!======================================================================= + + contains + +!======================================================================= +! +! This routine shuts down CICE by exiting all relevent environments. + + subroutine CICE_Finalize + + use ice_restart_shared, only: runid + use ice_timers, only: ice_timer_stop, ice_timer_print_all, timer_total + + character(len=*), parameter :: subname = '(CICE_Finalize)' + + !------------------------------------------------------------------- + ! stop timers and print timer info + !------------------------------------------------------------------- + + call ice_timer_stop(timer_total) ! stop timing entire run + call ice_timer_print_all(stats=.false.) ! print timing information + + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__,line= __LINE__) + + if (my_task == master_task) then + write(nu_diag, *) " " + write(nu_diag, *) "CICE COMPLETED SUCCESSFULLY " + write(nu_diag, *) " " + endif + +!echmod if (nu_diag /= 6) close (nu_diag) ! diagnostic output + call release_all_fileunits + + !------------------------------------------------------------------- + ! quit MPI + !------------------------------------------------------------------- + + call end_run ! quit MPI + + end subroutine CICE_Finalize + +!======================================================================= + + end module CICE_FinalMod + +!======================================================================= diff --git a/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 new file mode 100644 index 000000000..60f71fa8a --- /dev/null +++ b/cicecore/drivers/unittest/sumchk/CICE_InitMod.F90 @@ -0,0 +1,486 @@ +!======================================================================= +! +! This module contains the CICE initialization routine that sets model +! parameters and initializes the grid and CICE state variables. +! +! authors Elizabeth C. Hunke, LANL +! William H. Lipscomb, LANL +! Philip W. Jones, LANL +! +! 2006: Converted to free form source (F90) by Elizabeth Hunke +! 2008: E. Hunke moved ESMF code to its own driver + + module CICE_InitMod + + use ice_kinds_mod + use ice_exit, only: abort_ice + use ice_fileunits, only: init_fileunits, nu_diag + use icepack_intfc, only: icepack_aggregate + use icepack_intfc, only: icepack_init_itd, icepack_init_itd_hist + use icepack_intfc, only: icepack_init_fsd_bounds, icepack_init_wave + use icepack_intfc, only: icepack_configure + use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted + use icepack_intfc, only: icepack_query_parameters, icepack_query_tracer_flags, & + icepack_query_tracer_indices, icepack_query_tracer_sizes + + implicit none + private + public :: CICE_Initialize, cice_init + +!======================================================================= + + contains + +!======================================================================= + +! Initialize the basic state, grid and all necessary parameters for +! running the CICE model. Return the initial state in routine +! export state. +! Note: This initialization driver is designed for standalone and +! CESM-coupled applications. For other +! applications (e.g., standalone CAM), this driver would be +! replaced by a different driver that calls subroutine cice_init, +! where most of the work is done. + + subroutine CICE_Initialize + + character(len=*), parameter :: subname='(CICE_Initialize)' + !-------------------------------------------------------------------- + ! model initialization + !-------------------------------------------------------------------- + + call cice_init + + end subroutine CICE_Initialize + +!======================================================================= +! +! Initialize CICE model. + + subroutine cice_init + + use ice_arrays_column, only: hin_max, c_hi_range, alloc_arrays_column + use ice_arrays_column, only: floe_rad_l, floe_rad_c, & + floe_binwidth, c_fsd_range + use ice_state, only: alloc_state + use ice_flux_bgc, only: alloc_flux_bgc + use ice_calendar, only: dt, dt_dyn, istep, istep1, write_ic, & + init_calendar, advance_timestep, calc_timesteps + use ice_communicate, only: init_communicate, my_task, master_task + use ice_diagnostics, only: init_diags + use ice_domain, only: init_domain_blocks + use ice_domain_size, only: ncat, nfsd + use ice_dyn_eap, only: init_eap, alloc_dyn_eap + use ice_dyn_shared, only: kdyn, init_dyn, alloc_dyn_shared + use ice_dyn_vp, only: init_vp + use ice_flux, only: init_coupler_flux, init_history_therm, & + init_history_dyn, init_flux_atm, init_flux_ocn, alloc_flux + use ice_forcing, only: init_forcing_ocn, init_forcing_atmo, & + get_forcing_atmo, get_forcing_ocn, get_wave_spec + use ice_forcing_bgc, only: get_forcing_bgc, get_atm_bgc, & + faero_default, faero_optics, alloc_forcing_bgc, fiso_default + use ice_grid, only: init_grid1, init_grid2, alloc_grid + use ice_history, only: init_hist, accum_hist + use ice_restart_shared, only: restart, runtype + use ice_init, only: input_data, init_state + use ice_init_column, only: init_thermo_vertical, init_shortwave, init_zbgc, input_zbgc, count_tracers + use ice_kinds_mod + use ice_restoring, only: ice_HaloRestore_init + use ice_timers, only: timer_total, init_ice_timers, ice_timer_start + use ice_transport_driver, only: init_transport + + logical(kind=log_kind) :: tr_aero, tr_zaero, skl_bgc, z_tracers, & + tr_iso, tr_fsd, wave_spec + character(len=*), parameter :: subname = '(cice_init)' + + call init_communicate ! initial setup for message passing + call init_fileunits ! unit numbers + + ! tcx debug, this will create a different logfile for each pe + ! if (my_task /= master_task) nu_diag = 100+my_task + + call icepack_configure() ! initialize icepack + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + call input_data ! namelist variables + call input_zbgc ! vertical biogeochemistry namelist + call count_tracers ! count tracers + + call init_domain_blocks ! set up block decomposition + call init_grid1 ! domain distribution + call alloc_grid ! allocate grid arrays + call alloc_arrays_column ! allocate column arrays + call alloc_state ! allocate state arrays + call alloc_dyn_shared ! allocate dyn shared arrays + call alloc_flux_bgc ! allocate flux_bgc arrays + call alloc_flux ! allocate flux arrays + call init_ice_timers ! initialize all timers + call ice_timer_start(timer_total) ! start timing entire run + call init_grid2 ! grid variables + call init_zbgc ! vertical biogeochemistry initialization + call init_calendar ! initialize some calendar stuff + call init_hist (dt) ! initialize output history file + + call init_dyn (dt_dyn) ! define dynamics parameters, variables + if (kdyn == 2) then + call alloc_dyn_eap ! allocate dyn_eap arrays + call init_eap ! define eap dynamics parameters, variables + else if (kdyn == 3) then + call init_vp ! define vp dynamics parameters, variables + endif + + call init_coupler_flux ! initialize fluxes exchanged with coupler + + call init_thermo_vertical ! initialize vertical thermodynamics + + call icepack_init_itd(ncat=ncat, hin_max=hin_max) ! ice thickness distribution + if (my_task == master_task) then + call icepack_init_itd_hist(ncat=ncat, hin_max=hin_max, c_hi_range=c_hi_range) ! output + endif + + call icepack_query_tracer_flags(tr_fsd_out=tr_fsd) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + if (tr_fsd) call icepack_init_fsd_bounds (nfsd, & ! floe size distribution + floe_rad_l, & ! fsd size lower bound in m (radius) + floe_rad_c, & ! fsd size bin centre in m (radius) + floe_binwidth, & ! fsd size bin width in m (radius) + c_fsd_range, & ! string for history output + write_diags=(my_task == master_task)) ! write diag on master only + + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + call init_forcing_ocn(dt) ! initialize sss and sst from data + call init_state ! initialize the ice state + call init_transport ! initialize horizontal transport + call ice_HaloRestore_init ! restored boundary conditions + + call icepack_query_parameters(skl_bgc_out=skl_bgc, z_tracers_out=z_tracers, & + wave_spec_out=wave_spec) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + if (skl_bgc .or. z_tracers) call alloc_forcing_bgc ! allocate biogeochemistry arrays + + call init_restart ! initialize restart variables + call init_diags ! initialize diagnostic output points + call init_history_therm ! initialize thermo history variables + call init_history_dyn ! initialize dynamic history variables + call calc_timesteps ! update timestep counter if not using npt_unit="1" + + call icepack_query_tracer_flags(tr_aero_out=tr_aero, tr_zaero_out=tr_zaero) + call icepack_query_tracer_flags(tr_iso_out=tr_iso) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(trim(subname), & + file=__FILE__,line= __LINE__) + + if (tr_aero .or. tr_zaero) call faero_optics !initialize aerosol optical + !property tables + + ! Initialize shortwave components using swdn from previous timestep + ! if restarting. These components will be scaled to current forcing + ! in prep_radiation. + if (trim(runtype) == 'continue' .or. restart) & + call init_shortwave ! initialize radiative transfer + +! tcraig, use advance_timestep here +! istep = istep + 1 ! update time step counters +! istep1 = istep1 + 1 +! time = time + dt ! determine the time and date +! call calendar(time) ! at the end of the first timestep + call advance_timestep() + + !-------------------------------------------------------------------- + ! coupler communication or forcing data initialization + !-------------------------------------------------------------------- + + call init_forcing_atmo ! initialize atmospheric forcing (standalone) + + if (tr_fsd .and. wave_spec) call get_wave_spec ! wave spectrum in ice + call get_forcing_atmo ! atmospheric forcing from data + call get_forcing_ocn(dt) ! ocean forcing from data + + ! isotopes + if (tr_iso) call fiso_default ! default values + ! aerosols + ! if (tr_aero) call faero_data ! data file + ! if (tr_zaero) call fzaero_data ! data file (gx1) + if (tr_aero .or. tr_zaero) call faero_default ! default values + if (skl_bgc .or. z_tracers) call get_forcing_bgc ! biogeochemistry + if (z_tracers) call get_atm_bgc ! biogeochemistry + + if (runtype == 'initial' .and. .not. restart) & + call init_shortwave ! initialize radiative transfer using current swdn + + call init_flux_atm ! initialize atmosphere fluxes sent to coupler + call init_flux_ocn ! initialize ocean fluxes sent to coupler + + if (write_ic) call accum_hist(dt) ! write initial conditions + + end subroutine cice_init + +!======================================================================= + + subroutine init_restart + + use ice_arrays_column, only: dhsn + use ice_blocks, only: nx_block, ny_block + use ice_calendar, only: calendar + use ice_constants, only: c0 + use ice_domain, only: nblocks + use ice_domain_size, only: ncat, n_iso, n_aero, nfsd + use ice_dyn_eap, only: read_restart_eap + use ice_dyn_shared, only: kdyn + use ice_grid, only: tmask + use ice_init, only: ice_ic + use ice_init_column, only: init_age, init_FY, init_lvl, & + init_meltponds_cesm, init_meltponds_lvl, init_meltponds_topo, & + init_isotope, init_aerosol, init_hbrine, init_bgc, init_fsd + use ice_restart_column, only: restart_age, read_restart_age, & + restart_FY, read_restart_FY, restart_lvl, read_restart_lvl, & + restart_pond_cesm, read_restart_pond_cesm, & + restart_pond_lvl, read_restart_pond_lvl, & + restart_pond_topo, read_restart_pond_topo, & + restart_fsd, read_restart_fsd, & + restart_iso, read_restart_iso, & + restart_aero, read_restart_aero, & + restart_hbrine, read_restart_hbrine, & + restart_zsal, restart_bgc + use ice_restart_driver, only: restartfile + use ice_restart_shared, only: runtype, restart + use ice_state ! almost everything + + integer(kind=int_kind) :: & + i, j , & ! horizontal indices + iblk ! block index + logical(kind=log_kind) :: & + tr_iage, tr_FY, tr_lvl, tr_pond_cesm, tr_pond_lvl, & + tr_pond_topo, tr_fsd, tr_iso, tr_aero, tr_brine, & + skl_bgc, z_tracers, solve_zsal + integer(kind=int_kind) :: & + ntrcr + integer(kind=int_kind) :: & + nt_alvl, nt_vlvl, nt_apnd, nt_hpnd, nt_ipnd, & + nt_iage, nt_FY, nt_aero, nt_fsd, nt_isosno, nt_isoice + + character(len=*), parameter :: subname = '(init_restart)' + + call icepack_query_tracer_sizes(ntrcr_out=ntrcr) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + call icepack_query_parameters(skl_bgc_out=skl_bgc, & + z_tracers_out=z_tracers, solve_zsal_out=solve_zsal) + call icepack_query_tracer_flags(tr_iage_out=tr_iage, tr_FY_out=tr_FY, & + tr_lvl_out=tr_lvl, tr_pond_cesm_out=tr_pond_cesm, tr_pond_lvl_out=tr_pond_lvl, & + tr_pond_topo_out=tr_pond_topo, tr_aero_out=tr_aero, tr_brine_out=tr_brine, & + tr_fsd_out=tr_fsd, tr_iso_out=tr_iso) + call icepack_query_tracer_indices(nt_alvl_out=nt_alvl, nt_vlvl_out=nt_vlvl, & + nt_apnd_out=nt_apnd, nt_hpnd_out=nt_hpnd, nt_ipnd_out=nt_ipnd, & + nt_iage_out=nt_iage, nt_FY_out=nt_FY, nt_aero_out=nt_aero, nt_fsd_out=nt_fsd, & + nt_isosno_out=nt_isosno, nt_isoice_out=nt_isoice) + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + if (trim(runtype) == 'continue') then + ! start from core restart file + call restartfile() ! given by pointer in ice_in + call calendar() ! update time parameters + if (kdyn == 2) call read_restart_eap ! EAP + else if (restart) then ! ice_ic = core restart file + call restartfile (ice_ic) ! or 'default' or 'none' + !!! uncomment to create netcdf + ! call restartfile_v4 (ice_ic) ! CICE v4.1 binary restart file + !!! uncomment if EAP restart data exists + ! if (kdyn == 2) call read_restart_eap + endif + + ! tracers + ! ice age tracer + if (tr_iage) then + if (trim(runtype) == 'continue') & + restart_age = .true. + if (restart_age) then + call read_restart_age + else + do iblk = 1, nblocks + call init_age(trcrn(:,:,nt_iage,:,iblk)) + enddo ! iblk + endif + endif + ! first-year area tracer + if (tr_FY) then + if (trim(runtype) == 'continue') restart_FY = .true. + if (restart_FY) then + call read_restart_FY + else + do iblk = 1, nblocks + call init_FY(trcrn(:,:,nt_FY,:,iblk)) + enddo ! iblk + endif + endif + ! level ice tracer + if (tr_lvl) then + if (trim(runtype) == 'continue') restart_lvl = .true. + if (restart_lvl) then + call read_restart_lvl + else + do iblk = 1, nblocks + call init_lvl(iblk,trcrn(:,:,nt_alvl,:,iblk), & + trcrn(:,:,nt_vlvl,:,iblk)) + enddo ! iblk + endif + endif + ! CESM melt ponds + if (tr_pond_cesm) then + if (trim(runtype) == 'continue') & + restart_pond_cesm = .true. + if (restart_pond_cesm) then + call read_restart_pond_cesm + else + do iblk = 1, nblocks + call init_meltponds_cesm(trcrn(:,:,nt_apnd,:,iblk), & + trcrn(:,:,nt_hpnd,:,iblk)) + enddo ! iblk + endif + endif + ! level-ice melt ponds + if (tr_pond_lvl) then + if (trim(runtype) == 'continue') & + restart_pond_lvl = .true. + if (restart_pond_lvl) then + call read_restart_pond_lvl + else + do iblk = 1, nblocks + call init_meltponds_lvl(trcrn(:,:,nt_apnd,:,iblk), & + trcrn(:,:,nt_hpnd,:,iblk), & + trcrn(:,:,nt_ipnd,:,iblk), & + dhsn(:,:,:,iblk)) + enddo ! iblk + endif + endif + ! topographic melt ponds + if (tr_pond_topo) then + if (trim(runtype) == 'continue') & + restart_pond_topo = .true. + if (restart_pond_topo) then + call read_restart_pond_topo + else + do iblk = 1, nblocks + call init_meltponds_topo(trcrn(:,:,nt_apnd,:,iblk), & + trcrn(:,:,nt_hpnd,:,iblk), & + trcrn(:,:,nt_ipnd,:,iblk)) + enddo ! iblk + endif ! .not. restart_pond + endif + ! floe size distribution + if (tr_fsd) then + if (trim(runtype) == 'continue') restart_fsd = .true. + if (restart_fsd) then + call read_restart_fsd + else + call init_fsd(trcrn(:,:,nt_fsd:nt_fsd+nfsd-1,:,:)) + endif + endif + + ! isotopes + if (tr_iso) then + if (trim(runtype) == 'continue') restart_iso = .true. + if (restart_iso) then + call read_restart_iso + else + do iblk = 1, nblocks + call init_isotope(trcrn(:,:,nt_isosno:nt_isosno+n_iso-1,:,iblk), & + trcrn(:,:,nt_isoice:nt_isoice+n_iso-1,:,iblk)) + enddo ! iblk + endif + endif + + if (tr_aero) then ! ice aerosol + if (trim(runtype) == 'continue') restart_aero = .true. + if (restart_aero) then + call read_restart_aero + else + do iblk = 1, nblocks + call init_aerosol(trcrn(:,:,nt_aero:nt_aero+4*n_aero-1,:,iblk)) + enddo ! iblk + endif ! .not. restart_aero + endif + + if (trim(runtype) == 'continue') then + if (tr_brine) & + restart_hbrine = .true. + if (solve_zsal) & + restart_zsal = .true. + if (skl_bgc .or. z_tracers) & + restart_bgc = .true. + endif + + if (tr_brine .or. skl_bgc) then ! brine height tracer + call init_hbrine + if (tr_brine .and. restart_hbrine) call read_restart_hbrine + endif + + if (solve_zsal .or. skl_bgc .or. z_tracers) then ! biogeochemistry + if (tr_fsd) then + write (nu_diag,*) 'FSD implementation incomplete for use with BGC' + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + endif + call init_bgc + endif + + !----------------------------------------------------------------- + ! aggregate tracers + !----------------------------------------------------------------- + + !$OMP PARALLEL DO PRIVATE(iblk) + do iblk = 1, nblocks + do j = 1, ny_block + do i = 1, nx_block + if (tmask(i,j,iblk)) then + call icepack_aggregate(ncat = ncat, & + aicen = aicen(i,j,:,iblk), & + trcrn = trcrn(i,j,:,:,iblk), & + vicen = vicen(i,j,:,iblk), & + vsnon = vsnon(i,j,:,iblk), & + aice = aice (i,j, iblk), & + trcr = trcr (i,j,:,iblk), & + vice = vice (i,j, iblk), & + vsno = vsno (i,j, iblk), & + aice0 = aice0(i,j, iblk), & + ntrcr = ntrcr, & + trcr_depend = trcr_depend, & + trcr_base = trcr_base, & + n_trcr_strata = n_trcr_strata, & + nt_strata = nt_strata) + else + ! tcraig, reset all tracer values on land to zero + trcrn(i,j,:,:,iblk) = c0 + endif + enddo + enddo + enddo + !$OMP END PARALLEL DO + + call icepack_warnings_flush(nu_diag) + if (icepack_warnings_aborted()) call abort_ice(error_message=subname, & + file=__FILE__, line=__LINE__) + + end subroutine init_restart + +!======================================================================= + + end module CICE_InitMod + +!======================================================================= diff --git a/cicecore/drivers/unittest/sumchk/sumchk.F90 b/cicecore/drivers/unittest/sumchk/sumchk.F90 new file mode 100644 index 000000000..27379cf8a --- /dev/null +++ b/cicecore/drivers/unittest/sumchk/sumchk.F90 @@ -0,0 +1,641 @@ + + program sumchk + + use CICE_InitMod + use CICE_FinalMod + use ice_kinds_mod, only: int_kind, dbl_kind, real_kind + use ice_communicate, only: my_task, master_task, get_num_procs + use ice_domain_size, only: block_size_x, block_size_y, max_blocks + use ice_domain, only: distrb_info + use ice_blocks, only: block, get_block, nx_block, ny_block, nblocks_tot + use ice_distribution, only: ice_distributionGetBlockID, ice_distributionGet + use ice_constants, only: field_loc_center + use ice_fileunits, only: bfbflag + use ice_global_reductions + use ice_exit, only: abort_ice + + implicit none + + integer(int_kind) :: i, j, k, l, n, nscale, iblock, ib, ie, jb, je + integer(int_kind) :: blockID, numBlocks + type (block) :: this_block + + real(dbl_kind) ,allocatable :: arrayA(:,:,:),arrayB(:,:,:),arrayC(:,:,:) + integer(int_kind),allocatable :: arrayiA(:,:,:),arrayiB(:,:,:) + real(dbl_kind) ,allocatable :: array8(:,:,:),array82(:,:,:) + real(real_kind) ,allocatable :: array4(:,:,:),array42(:,:,:) + integer(int_kind),allocatable :: arrayi1(:,:,:),arrayi2(:,:,:) + real(dbl_kind) ,allocatable :: mmask8(:,:,:) + real(real_kind) ,allocatable :: mmask4(:,:,:) + integer(int_kind),allocatable :: mmaski(:,:,:) + logical ,allocatable :: lmask (:,:,:) + real(dbl_kind) ,allocatable :: vec8(:),sum8(:) + real(dbl_kind) :: locval, corval, minval, maxval ! local, correct, min, max values + real(dbl_kind) :: locval8, sumval8, minval8, maxval8 + real(real_kind) :: locval4, sumval4, minval4, maxval4 + integer(int_kind) :: locvali, sumvali, corvali, minvali, maxvali + real(dbl_kind) :: lscale + real(dbl_kind) :: reldig,reldigchk_now + real(dbl_kind) ,allocatable :: reldigchk(:,:) + + character(len=8) :: errorflag0 + character(len=32) :: string + integer(int_kind),parameter :: ntests1 = 19 + character(len=8) :: errorflag1(ntests1) + character(len=32) :: stringflag1(ntests1) + integer(int_kind),parameter :: ntests2 = 6 + character(len=8) :: errorflag2(ntests2) + character(len=32) :: stringflag2(ntests2) + integer(int_kind),parameter :: ntests3 = 3 + character(len=8) :: errorflag3(ntests3) + character(len=32) :: stringflag3(ntests3) + integer(int_kind),parameter :: ntests4 = 1 + character(len=8) :: errorflag4(ntests4) + character(len=32) :: stringflag4(ntests4) + + integer(int_kind) :: npes, ierr, ntask + + integer(int_kind), parameter :: nbflags = 6 + character(len=8), parameter :: bflags(1:nbflags) = & + (/ 'off ','lsum8 ','lsum16 ','lsum4 ','ddpdd ','reprosum' /) + character(len=*), parameter :: & + passflag = 'PASS', & + failflag = 'FAIL' + character(len=*), parameter :: subname='(sumchk)' + + !----------------------------------------------------------------- + ! Initialize CICE + !----------------------------------------------------------------- + + call CICE_Initialize + + !----------------------------------------------------------------- + ! Testing + !----------------------------------------------------------------- + + errorflag0 = passflag + errorflag1 = passflag + errorflag2 = passflag + errorflag3 = passflag + errorflag4 = passflag + npes = get_num_procs() + + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) '==========================================================' + write(6,*) ' ' + write(6,*) 'Running SUMCHK' + write(6,*) ' ' + write(6,*) ' npes = ',npes + write(6,*) ' my_task = ',my_task + write(6,*) ' nblocks_tot = ',nblocks_tot + write(6,*) ' block_size_x = ',block_size_x + write(6,*) ' block_size_y = ',block_size_y + write(6,*) ' ' + endif + + ! --------------------------- + ! TEST GLOBAL SUMS + ! --------------------------- + ! test difficult sum + ! fill array with constant value that sums to corval when 2 gridcells per block are excluded + ! fill those two gridcells per block with very large and opposite signed values + ! arrayA should sum to corval, arrayB should sum to corval when mask is applied on 2 gridcells + ! fill 2 extra gridcells with special values + ! lscale defines relative size of large values + ! arrayA has large and opposite values in upper right hand corner of block + ! arrayB has large and same size values in upper right hand corner to check masks + ! arrayC has large and opposite values in first two values of block + ! arrayA should add large values at end of a local sum (bad) + ! arrayC should add large values first then rest of values (not so bad) + + if (my_task == master_task) write(6,*) ' ' + + allocate(arrayA (nx_block,ny_block,max_blocks)) + allocate(arrayB (nx_block,ny_block,max_blocks)) + allocate(arrayC (nx_block,ny_block,max_blocks)) + allocate(arrayiA(nx_block,ny_block,max_blocks)) + allocate(arrayiB(nx_block,ny_block,max_blocks)) + allocate(array4 (nx_block,ny_block,max_blocks)) + allocate(array8 (nx_block,ny_block,max_blocks)) + allocate(array42(nx_block,ny_block,max_blocks)) + allocate(array82(nx_block,ny_block,max_blocks)) + allocate(arrayi1(nx_block,ny_block,max_blocks)) + allocate(arrayi2(nx_block,ny_block,max_blocks)) + allocate(mmask4 (nx_block,ny_block,max_blocks)) + allocate(mmask8 (nx_block,ny_block,max_blocks)) + allocate(mmaski (nx_block,ny_block,max_blocks)) + allocate(lmask (nx_block,ny_block,max_blocks)) + + ! set corval to something a little interesting (not 1.0 for instance which gives atypical results) + + corval = 4.0_dbl_kind/3.0_dbl_kind + locval = corval / real(nblocks_tot*(block_size_x*block_size_y-2),dbl_kind) + corvali = 92544 + + if (my_task == master_task) then +! write(6,*) ' local array value = ',locval + write(6,*) ' correct value = ',corval + write(6,*) ' correct value int = ',corvali + write(6,*) ' ' + endif + + call ice_distributionGet(distrb_info, numLocalBlocks = numBlocks) + + nscale = 4 + + ! correct results for relative digits check in sum + allocate(reldigchk(nbflags,nscale)) + reldigchk(:,:) = 15.7 + reldigchk(1:2,1) = 14. + reldigchk(4,1) = 3.9 + reldigchk(1:2,2) = 9. + reldigchk(4,2) = 1. + reldigchk(1:2,3) = 1. + reldigchk(4,3) = 0. + reldigchk(1:2,4) = 0. + reldigchk(3,4) = 3. + reldigchk(4,4) = 0. + reldigchk(5,4) = 15. + + ! test list + n = 1 ; stringflag1(n) = 'dble sum easy' + n = n + 1; stringflag1(n) = 'dble sum' + n = n + 1; stringflag1(n) = 'real sum' + n = n + 1; stringflag1(n) = 'intg sum' + n = n + 1; stringflag1(n) = 'dble sum + dble mask' + n = n + 1; stringflag1(n) = 'real sum + real mask' + n = n + 1; stringflag1(n) = 'intg sum + intg mask' + n = n + 1; stringflag1(n) = 'dble sum + logical mask' + n = n + 1; stringflag1(n) = 'real sum + logical mask' + n = n + 1; stringflag1(n) = 'intg sum + logical mask' + n = n + 1; stringflag1(n) = 'dble prod sum' + n = n + 1; stringflag1(n) = 'real prod sum' + n = n + 1; stringflag1(n) = 'intg prod sum' + n = n + 1; stringflag1(n) = 'dble prod sum + dble mask' + n = n + 1; stringflag1(n) = 'real prod sum + real mask' + n = n + 1; stringflag1(n) = 'intg prod sum + intg mask' + n = n + 1; stringflag1(n) = 'dble prod sum + logical mask' + n = n + 1; stringflag1(n) = 'real prod sum + logical mask' + n = n + 1; stringflag1(n) = 'intg prod sum + logical mask' + + do l = 1, nscale + arrayA(:,:,:) = locval + arrayB(:,:,:) = locval + arrayC(:,:,:) = locval + lmask(:,:,:) = .true. + lscale = 1.0_dbl_kind + if (l == 2) lscale = 1.0e8_dbl_kind + if (l == 3) lscale = 1.0e16_dbl_kind + if (l == 4) lscale = 1.0e32_dbl_kind + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) 'test ',l + write(6,'(a,e10.4)') 'lscale = ',lscale + write(6,'(6x,a,28x,a,8x,a,10x,a)') 'test','bfbflag','sum','digits of precision (max is 16)' + endif + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + + lmask(ie,je,iblock) = .false. + lmask(ie,je-1,iblock) = .false. + arrayA(ie,je,iblock) = locval * lscale + arrayA(ie,je-1,iblock) = -arrayA(ie,je,iblock) + arrayB(ie,je,iblock) = locval * lscale + arrayB(ie,je-1,iblock) = arrayB(ie,je,iblock) + arrayC(ib,jb,iblock) = locval * lscale + arrayC(ib+1,jb,iblock) = -arrayA(ie,je,iblock) + arrayiA(:,:,iblock) = 8 + arrayiB(:,:,iblock) = 8 + arrayiA(ie,je,iblock) = 137 * 4 + arrayiA(ie,je-1,iblock) = -arrayiA(ie,je,iblock) + enddo + + do k = 1,ntests1 + do n = 1,nbflags + bfbflag = bflags(n) + string = stringflag1(k) + sumval8 = 888.0e12 + sumvali = 8888888 + + if (k == 1) then + array8(:,:,:) = arrayC(:,:,:) + sumval8 = global_sum(array8, distrb_info, field_loc_center) + elseif (k == 2) then + array8(:,:,:) = arrayA(:,:,:) + sumval8 = global_sum(array8, distrb_info, field_loc_center) + elseif (k == 3) then + array4(:,:,:) = arrayA(:,:,:) + sumval4 = global_sum(array4, distrb_info, field_loc_center) + sumval8 = sumval4 + elseif (k == 4) then + arrayi1 = arrayiA + sumvali = global_sum(arrayi1, distrb_info, field_loc_center) + elseif (k == 5) then + mmask8(:,:,:) = 6.0_dbl_kind + array8(:,:,:) = arrayA(:,:,:)/mmask8(:,:,:) + sumval8 = global_sum(array8, distrb_info, field_loc_center, mmask=mmask8) + elseif (k == 6) then + mmask4(:,:,:) = 6.0_real_kind + array4(:,:,:) = arrayA(:,:,:)/mmask4(:,:,:) + sumval4 = global_sum(array4, distrb_info, field_loc_center, mmask=mmask4) + sumval8 = sumval4 + elseif (k == 7) then + mmaski(:,:,:) = 2 + arrayi1(:,:,:) = arrayiA(:,:,:)/mmaski(:,:,:) + sumvali = global_sum(arrayi1, distrb_info, field_loc_center, mmask=mmaski) + elseif (k == 8) then + array8(:,:,:) = arrayB(:,:,:) + sumval8 = global_sum(array8, distrb_info, field_loc_center, lmask=lmask) + elseif (k == 9) then + array4(:,:,:) = arrayB(:,:,:) + sumval4 = global_sum(array4, distrb_info, field_loc_center, lmask=lmask) + sumval8 = sumval4 + elseif (k == 10) then + arrayi1(:,:,:) = arrayiB(:,:,:) + sumvali = global_sum(arrayi1, distrb_info, field_loc_center, lmask=lmask) + elseif (k == 11) then + array82(:,:,:) = 7.0_dbl_kind + array8(:,:,:) = arrayA(:,:,:)/array82(:,:,:) + sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc_center) + elseif (k == 12) then + array42(:,:,:) = 7.0_real_kind + array4(:,:,:) = arrayA(:,:,:)/array42(:,:,:) + sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc_center) + sumval8 = sumval4 + elseif (k == 13) then + arrayi2(:,:,:) = 4 + arrayi1(:,:,:) = arrayiA(:,:,:)/arrayi2(:,:,:) + sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc_center) + elseif (k == 14) then + array82(:,:,:) = 7.0_dbl_kind + mmask8(:,:,:) = 6.0_dbl_kind + array8(:,:,:) = arrayA(:,:,:)/(mmask8(:,:,:)*array82(:,:,:)) + sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc_center, mmask=mmask8) + elseif (k == 15) then + array42(:,:,:) = 7.0_real_kind + mmask4(:,:,:) = 6.0_real_kind + array4(:,:,:) = arrayA(:,:,:)/(mmask4(:,:,:)*array42(:,:,:)) + sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc_center, mmask=mmask4) + sumval8 = sumval4 + elseif (k == 16) then + arrayi2(:,:,:) = 2 + mmaski(:,:,:) = 2 + arrayi1(:,:,:) = arrayiA(:,:,:)/(arrayi2(:,:,:)*mmaski(:,:,:)) + sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc_center, mmask=mmaski) + elseif (k == 17) then + array82(:,:,:) = 7.0_dbl_kind + array8(:,:,:) = arrayB(:,:,:)/array82(:,:,:) + sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc_center, lmask=lmask) + elseif (k == 18) then + array42(:,:,:) = 7.0_real_kind + array4(:,:,:) = arrayB(:,:,:)/array42(:,:,:) + sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc_center, lmask=lmask) + sumval8 = sumval4 + elseif (k == 19) then + arrayi2(:,:,:) = 4 + arrayi1(:,:,:) = arrayiB(:,:,:)/(arrayi2(:,:,:)) + sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc_center, lmask=lmask) + else + call abort_ice(subname//' illegal k sum',file=__FILE__,line=__LINE__) + endif + + if (string(1:4) == 'intg') then + ! integer + if (my_task == master_task) then + write(6,'(1x,a,a10,i12)') string,trim(bfbflag), sumvali + endif + if (sumvali /= corvali) then + errorflag1(k) = failflag + errorflag0 = failflag + if (my_task == master_task) then + write(6,*) '**** ERROR ',sumvali,corvali + endif + endif + else + ! real/dbl + if (sumval8 == corval) then + reldig = 16.0_dbl_kind + elseif (sumval8 == 0._dbl_kind) then + reldig = 0 + else + reldig = -log10(abs(corval-sumval8)/corval) + endif + if (my_task == master_task) then + write(6,'(1x,a,a10,g25.17,f8.2)') string,trim(bfbflag), sumval8, reldig + endif + + ! (real*4) can't have more than 8 digits of precision + reldigchk_now = reldigchk(n,l) + if (string(1:4) == 'real') reldigchk_now = min(reldigchk(n,l),7.0) + if (reldig < reldigchk_now) then + errorflag1(k) = failflag + errorflag0 = failflag + if (my_task == master_task) then + write(6,*) '**** ERROR ',reldig,reldigchk_now + endif + endif + endif + enddo ! l + enddo ! n + enddo ! k + + ! --------------------------- + ! Test Global Min/Max + ! --------------------------- + + if (my_task == master_task) write(6,*) ' ' + + n = 1 ; stringflag2(n) = 'dble min/max' + n = n + 1; stringflag2(n) = 'real min/max' + n = n + 1; stringflag2(n) = 'intg min/max' + n = n + 1; stringflag2(n) = 'dble min/max + logical mask' + n = n + 1; stringflag2(n) = 'real min/max + logical mask' + n = n + 1; stringflag2(n) = 'intg min/max + logical mask' + + minval = -17. + maxval = 37. + + ! fill arrays with large values as default + array8 = 999.0e10_dbl_kind + array4 = 999.0e10_real_kind + arrayi1 = 9999999 + + n = 1 + ! fill active part of arrays with values between 0 and 10 + do iblock = 1,numBlocks + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + do j = jb,je + do i = ib,ie + n = n + 1 + array8(i,j,iblock) = real(mod(n,10),dbl_kind) + array4(i,j,iblock) = real(mod(n,8),real_kind) + arrayi1(i,j,iblock) = mod(n,9) + enddo + enddo + enddo + + ! fill one gridcell with a min and max value + ntask = max(npes-1,1)-1 + iblock = max(numBlocks-1,1) + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + i = max(ie-3,ib) + j = max(je-4,jb) + if (my_task == ntask) then + array8(i,j,iblock) = minval + array4(i,j,iblock) = minval + arrayi1(i,j,iblock) = minval + endif + + ntask = min(npes,2)-1 + iblock = min(numBlocks,2) + call ice_distributionGetBlockID(distrb_info, iblock, blockID) + this_block = get_block(blockID, blockID) + ib = this_block%ilo + ie = this_block%ihi + jb = this_block%jlo + je = this_block%jhi + i = min(ib+1,ie) + j = min(jb+2,je) + if (my_task == ntask) then + array8(i,j,iblock) = maxval + array4(i,j,iblock) = maxval + arrayi1(i,j,iblock) = maxval + endif + + do k = 1,ntests2 + string = stringflag2(k) + minval8 = 888e12 + maxval8 = -888e12 + if (k == 1) then + minval8 = global_minval(array8, distrb_info) + maxval8 = global_maxval(array8, distrb_info) + elseif (k == 2) then + minval4 = global_minval(array4, distrb_info) + maxval4 = global_maxval(array4, distrb_info) + minval8 = minval4 + maxval8 = maxval4 + elseif (k == 3) then + minvali = global_minval(arrayi1, distrb_info) + maxvali = global_maxval(arrayi1, distrb_info) + minval8 = minvali + maxval8 = maxvali + elseif (k == 4) then + minval8 = global_minval(array8, distrb_info, lmask=lmask) + maxval8 = global_maxval(array8, distrb_info, lmask=lmask) + elseif (k == 5) then + minval4 = global_minval(array4, distrb_info, lmask=lmask) + maxval4 = global_maxval(array4, distrb_info, lmask=lmask) + minval8 = minval4 + maxval8 = maxval4 + elseif (k == 6) then + minvali = global_minval(arrayi1, distrb_info, lmask=lmask) + maxvali = global_maxval(arrayi1, distrb_info, lmask=lmask) + minval8 = minvali + maxval8 = maxvali + else + call abort_ice(subname//' illegal k minmax',file=__FILE__,line=__LINE__) + endif + + if (my_task == master_task) then + write(6,'(1x,a,2g16.8)') string, minval8, maxval8 + endif + + if (minval8 /= minval .or. maxval8 /= maxval) then + errorflag2(k) = failflag + errorflag0 = failflag + if (my_task == master_task) then + write(6,*) '**** ERROR ', minval8, minval, maxval8, maxval + endif + endif + enddo + + ! --------------------------- + ! Test Scalar Reductions + ! --------------------------- + + if (my_task == master_task) write(6,*) ' ' + + n = 1 ; stringflag3(n) = 'dble scalar min/max/sum' + n = n + 1; stringflag3(n) = 'real scalar min/max/sum' + n = n + 1; stringflag3(n) = 'intg scalar min/max/sum' + + minval = -5. + maxval = 8. + + locval8 = 1. + locval4 = 1. + locvali = 1. + + ! fill one gridcell with a min and max value + ntask = max(npes-1,1)-1 + if (my_task == ntask) then + locval8 = minval + locval4 = minval + locvali = minval + endif + ntask = min(npes,2)-1 + if (my_task == ntask) then + locval8 = maxval + locval4 = maxval + locvali = maxval + endif + + ! compute correct results + if (npes == 1) then + minval = maxval + corval = maxval + else + corval = (npes - 2) * 1.0 + minval + maxval + endif + + do k = 1,ntests3 + string = stringflag3(k) + minval8 = 888e12 + maxval8 = -888e12 + sumval8 = -888e12 + if (k == 1) then + minval8 = global_minval(locval8, distrb_info) + maxval8 = global_maxval(locval8, distrb_info) + sumval8 = global_sum (locval8, distrb_info) + elseif (k == 2) then + minval4 = global_minval(locval4, distrb_info) + maxval4 = global_maxval(locval4, distrb_info) + sumval4 = global_sum (locval4, distrb_info) + minval8 = minval4 + maxval8 = maxval4 + sumval8 = sumval4 + elseif (k == 3) then + minvali = global_minval(locvali, distrb_info) + maxvali = global_maxval(locvali, distrb_info) + sumvali = global_sum (locvali, distrb_info) + minval8 = minvali + maxval8 = maxvali + sumval8 = sumvali + else + call abort_ice(subname//' illegal k scalar',file=__FILE__,line=__LINE__) + endif + + if (my_task == master_task) then + write(6,'(1x,a,3g16.8)') string, minval8, maxval8, sumval8 + endif + + if (minval8 /= minval .or. maxval8 /= maxval .or. sumval8 /= corval) then + errorflag3(k) = failflag + errorflag0 = failflag + if (my_task == master_task) then + write(6,*) '**** ERROR ', minval8, minval, maxval8, maxval, sumval8, corval + endif + endif + enddo + + ! --------------------------- + ! Test Vector Reductions + ! --------------------------- + + if (my_task == master_task) write(6,*) ' ' + + n = 1 ; stringflag4(n) = 'dble sum vector' + allocate(vec8(3)) + allocate(sum8(3)) + + minval = -5. + maxval = 8. + + vec8(1) = 1. + + ! fill one gridcell with a min and max value + ntask = max(npes-1,1)-1 + if (my_task == ntask) then + vec8(1) = minval + endif + ntask = min(npes,2)-1 + if (my_task == ntask) then + vec8(1) = maxval + endif + vec8(2) = 2. * vec8(1) + vec8(3) = 3. * vec8(1) + + ! compute correct results + if (npes == 1) then + minval = maxval + corval = maxval + else + corval = (npes - 2) * 1.0 + minval + maxval + endif + + do k = 1,ntests4 + string = stringflag4(k) + sum8 = -888e12 + if (k == 1) then + sum8 = global_allreduce_sum(vec8, distrb_info) + else + call abort_ice(subname//' illegal k vector',file=__FILE__,line=__LINE__) + endif + + if (my_task == master_task) then + write(6,'(1x,a,3g16.8)') string, sum8(1),sum8(2),sum8(3) + endif + + if (sum8(1) /= corval .or. sum8(2) /= 2.*corval .or. sum8(3) /= 3.*corval) then + errorflag4(k) = failflag + errorflag0 = failflag + if (my_task == master_task) then + write(6,*) '**** ERROR ', sum8(1),sum8(2),sum8(3),corval + endif + endif + enddo + + ! --------------------------- + + if (my_task == master_task) then + write(6,*) ' ' + do k = 1,ntests1 + write(6,*) errorflag1(k),stringflag1(k) + enddo + do k = 1,ntests2 + write(6,*) errorflag2(k),stringflag2(k) + enddo + do k = 1,ntests3 + write(6,*) errorflag3(k),stringflag3(k) + enddo + do k = 1,ntests4 + write(6,*) errorflag4(k),stringflag4(k) + enddo + write(6,*) ' ' + if (errorflag0 == passflag) then + write(6,*) 'SUMCHK COMPLETED SUCCESSFULLY' + else + write(6,*) 'SUMCHK FAILED' + call abort_ice(subname//' ERROR: SUMCHK FAILED',file=__FILE__,line=__LINE__) + endif + write(6,*) ' ' + write(6,*) '==========================================================' + write(6,*) ' ' + endif + + + !----------------------------------------------------------------- + ! Finalize CICE + !----------------------------------------------------------------- + + call CICE_Finalize + + end program sumchk + +!======================================================================= diff --git a/configuration/scripts/Makefile b/configuration/scripts/Makefile index e0b7799d6..de3658a3e 100644 --- a/configuration/scripts/Makefile +++ b/configuration/scripts/Makefile @@ -75,7 +75,7 @@ AR := ar .SUFFIXES: .SUFFIXES: .F90 .F .c .o -.PHONY: all cice libcice targets target db_files db_flags clean realclean helloworld calchk +.PHONY: all cice libcice targets target db_files db_flags clean realclean helloworld calchk sumchk all: $(EXEC) cice: $(EXEC) @@ -94,7 +94,7 @@ targets: @echo " " @echo "Supported Makefile Targets are: cice, libcice, makdep, depends, clean, realclean" @echo " Diagnostics: targets, db_files, db_flags" - @echo " Unit Tests : helloworld, calchk" + @echo " Unit Tests : helloworld, calchk, sumchk" target: targets db_files: @@ -143,9 +143,13 @@ $(DEPGEN): $(OBJS_DEPGEN) # this builds all dependent source code automatically even though only a subset might actually be used # this is no different than the cice target and in fact the binary is called cice # it exists just to create separation as needed for unit tests + calchk: $(EXEC) +sumchk: $(EXEC) + # this builds just a subset of source code specified explicitly and requires a separate target + HWOBJS := helloworld.o helloworld: $(HWOBJS) $(LD) -o $(EXEC) $(LDFLAGS) $(HWOBJS) $(ULIBS) $(SLIBS) diff --git a/configuration/scripts/options/set_env.sumchk b/configuration/scripts/options/set_env.sumchk new file mode 100644 index 000000000..8a8495df2 --- /dev/null +++ b/configuration/scripts/options/set_env.sumchk @@ -0,0 +1,2 @@ +setenv ICE_DRVOPT unittest/sumchk +setenv ICE_TARGET sumchk diff --git a/configuration/scripts/tests/unittest_suite.ts b/configuration/scripts/tests/unittest_suite.ts index 55406fce8..d422b2674 100644 --- a/configuration/scripts/tests/unittest_suite.ts +++ b/configuration/scripts/tests/unittest_suite.ts @@ -1,4 +1,5 @@ -# Test Grid PEs Sets BFB-compare -unittest gx3 1x1 helloworld -unittest gx3 1x1 calchk,short - +# Test Grid PEs Sets BFB-compare +unittest gx3 1x1 helloworld +unittest gx3 1x1 calchk,short +unittest gx3 4x1x25x29x4 sumchk +unittest gx3 1x1x25x29x16 sumchk From 660314d922964027ee1d75ec1b775d8a411c6b11 Mon Sep 17 00:00:00 2001 From: apcraig Date: Sat, 5 Jun 2021 20:51:03 -0600 Subject: [PATCH 3/6] - add bcstchk unit test - update ice_broadcast to sync up serial and mpi versions - add get_rank to ice_communicate.F90 - add global_[min/max]val_scalar_int_nodist method to ice_global_reductions.F90 - add tripole output in ice_blocks.F90 with debug_blocks - update set_nml.tx1 to set ns_boundary_type to 'tripole', was 'open' --- .../infrastructure/comm/mpi/ice_broadcast.F90 | 74 +++ .../comm/mpi/ice_communicate.F90 | 27 + .../comm/mpi/ice_global_reductions.F90 | 105 +++- .../comm/serial/ice_broadcast.F90 | 467 +++++++++++++++--- .../comm/serial/ice_communicate.F90 | 24 + .../comm/serial/ice_global_reductions.F90 | 105 +++- .../cicedynB/infrastructure/ice_blocks.F90 | 5 +- cicecore/drivers/unittest/bcstchk/bcstchk.F90 | 281 +++++++++++ cicecore/drivers/unittest/sumchk/sumchk.F90 | 87 ++-- configuration/scripts/Makefile | 6 +- configuration/scripts/options/set_env.bcstchk | 2 + configuration/scripts/options/set_nml.tx1 | 1 + configuration/scripts/tests/unittest_suite.ts | 3 + 13 files changed, 1079 insertions(+), 108 deletions(-) create mode 100644 cicecore/drivers/unittest/bcstchk/bcstchk.F90 create mode 100644 configuration/scripts/options/set_env.bcstchk diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_broadcast.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_broadcast.F90 index 87c78f9df..7d221c65e 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_broadcast.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_broadcast.F90 @@ -8,9 +8,15 @@ module ice_broadcast ! author: Phil Jones, LANL ! Oct. 2004: Adapted from POP version by William H. Lipscomb, LANL +#ifndef SERIAL_REMOVE_MPI use mpi ! MPI Fortran module +#endif use ice_kinds_mod +#ifdef SERIAL_REMOVE_MPI + use ice_communicate, only: MPI_COMM_ICE +#else use ice_communicate, only: mpiR8, mpir4, MPI_COMM_ICE +#endif use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted @@ -78,8 +84,12 @@ subroutine broadcast_scalar_dbl(scalar, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else call MPI_BCAST(scalar, 1, mpiR8, root_pe, MPI_COMM_ICE, ierr) call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif !----------------------------------------------------------------------- @@ -110,8 +120,12 @@ subroutine broadcast_scalar_real(scalar, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else call MPI_BCAST(scalar, 1, mpiR4, root_pe, MPI_COMM_ICE, ierr) call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif !----------------------------------------------------------------------- @@ -142,8 +156,12 @@ subroutine broadcast_scalar_int(scalar, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else call MPI_BCAST(scalar, 1, MPI_INTEGER, root_pe, MPI_COMM_ICE,ierr) call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif !----------------------------------------------------------------------- @@ -176,6 +194,9 @@ subroutine broadcast_scalar_log(scalar, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else if (scalar) then itmp = 1 else @@ -190,6 +211,7 @@ subroutine broadcast_scalar_log(scalar, root_pe) else scalar = .false. endif +#endif !----------------------------------------------------------------------- @@ -222,10 +244,14 @@ subroutine broadcast_scalar_char(scalar, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else clength = len(scalar) call MPI_BCAST(scalar, clength, MPI_CHARACTER, root_pe, MPI_COMM_ICE, ierr) call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif !-------------------------------------------------------------------- @@ -258,10 +284,14 @@ subroutine broadcast_array_dbl_1d(array, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else nelements = size(array) call MPI_BCAST(array, nelements, mpiR8, root_pe, MPI_COMM_ICE, ierr) call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif !----------------------------------------------------------------------- @@ -294,10 +324,14 @@ subroutine broadcast_array_real_1d(array, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else nelements = size(array) call MPI_BCAST(array, nelements, mpiR4, root_pe, MPI_COMM_ICE, ierr) call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif !----------------------------------------------------------------------- @@ -330,10 +364,14 @@ subroutine broadcast_array_int_1d(array, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else nelements = size(array) call MPI_BCAST(array, nelements, MPI_INTEGER, root_pe, MPI_COMM_ICE, ierr) call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif !----------------------------------------------------------------------- @@ -370,6 +408,9 @@ subroutine broadcast_array_log_1d(array, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else nelements = size(array) allocate(array_int(nelements)) @@ -390,6 +431,7 @@ subroutine broadcast_array_log_1d(array, root_pe) end where deallocate(array_int) +#endif !----------------------------------------------------------------------- @@ -422,10 +464,14 @@ subroutine broadcast_array_dbl_2d(array, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else nelements = size(array) call MPI_BCAST(array, nelements, mpiR8, root_pe, MPI_COMM_ICE, ierr) call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif !----------------------------------------------------------------------- @@ -458,10 +504,14 @@ subroutine broadcast_array_real_2d(array, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else nelements = size(array) call MPI_BCAST(array, nelements, mpiR4, root_pe, MPI_COMM_ICE, ierr) call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif !----------------------------------------------------------------------- @@ -494,10 +544,14 @@ subroutine broadcast_array_int_2d(array, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else nelements = size(array) call MPI_BCAST(array, nelements, MPI_INTEGER, root_pe, MPI_COMM_ICE, ierr) call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif !----------------------------------------------------------------------- @@ -534,6 +588,9 @@ subroutine broadcast_array_log_2d(array, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else nelements = size(array) allocate(array_int(size(array,dim=1),size(array,dim=2))) @@ -554,6 +611,7 @@ subroutine broadcast_array_log_2d(array, root_pe) end where deallocate(array_int) +#endif !----------------------------------------------------------------------- @@ -586,10 +644,14 @@ subroutine broadcast_array_dbl_3d(array, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else nelements = size(array) call MPI_BCAST(array, nelements, mpiR8, root_pe, MPI_COMM_ICE, ierr) call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif !----------------------------------------------------------------------- @@ -622,10 +684,14 @@ subroutine broadcast_array_real_3d(array, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else nelements = size(array) call MPI_BCAST(array, nelements, mpiR4, root_pe, MPI_COMM_ICE, ierr) call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif !----------------------------------------------------------------------- @@ -658,10 +724,14 @@ subroutine broadcast_array_int_3d(array, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else nelements = size(array) call MPI_BCAST(array, nelements, MPI_INTEGER, root_pe, MPI_COMM_ICE, ierr) call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif !----------------------------------------------------------------------- @@ -698,6 +768,9 @@ subroutine broadcast_array_log_3d(array, root_pe) !----------------------------------------------------------------------- +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else nelements = size(array) allocate(array_int(size(array,dim=1), & size(array,dim=2), & @@ -720,6 +793,7 @@ subroutine broadcast_array_log_3d(array, root_pe) end where deallocate(array_int) +#endif !----------------------------------------------------------------------- diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 index 1c369ef93..00f427144 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_communicate.F90 @@ -18,6 +18,7 @@ module ice_communicate public :: init_communicate, & get_num_procs, & + get_rank, & ice_barrier, & create_communicator @@ -121,6 +122,32 @@ function get_num_procs() end function get_num_procs +!*********************************************************************** + + function get_rank() + +! This function returns the number of processor assigned to +! MPI_COMM_ICE + + integer (int_kind) :: get_rank + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: ierr + character(len=*), parameter :: subname = '(get_rank)' + +!----------------------------------------------------------------------- + + call MPI_COMM_RANK(MPI_COMM_ICE, get_rank, ierr) + +!----------------------------------------------------------------------- + + end function get_rank + !*********************************************************************** subroutine ice_barrier() diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 index 1d724fb39..0a512ef3e 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 @@ -74,7 +74,8 @@ module ice_global_reductions global_maxval_int, & global_maxval_scalar_dbl, & global_maxval_scalar_real, & - global_maxval_scalar_int + global_maxval_scalar_int, & + global_maxval_scalar_int_nodist end interface interface global_minval @@ -83,7 +84,8 @@ module ice_global_reductions global_minval_int, & global_minval_scalar_dbl, & global_minval_scalar_real, & - global_minval_scalar_int + global_minval_scalar_int, & + global_minval_scalar_int_nodist end interface !*********************************************************************** @@ -1683,6 +1685,56 @@ function global_maxval_scalar_int (scalar, dist) & end function global_maxval_scalar_int +!*********************************************************************** + + function global_maxval_scalar_int_nodist (scalar, communicator) & + result(globalMaxval) + +! Computes the global maximum value of a scalar value across +! a distributed machine. +! +! This is actually the specific interface for the generic global_maxval +! function corresponding to single precision scalars. + + integer (int_kind), intent(in) :: & + scalar ! scalar for which max value needed + + integer (int_kind), intent(in) :: & + communicator ! mpi communicator + + integer (int_kind) :: & + globalMaxval ! resulting maximum value + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + ierr ! mpi error flag + + character(len=*), parameter :: subname = '(global_maxval_scalar_int_nodist)' + +!----------------------------------------------------------------------- + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local maxval to global maxval +! +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + globalMaxval = scalar +#else + call MPI_ALLREDUCE(scalar, globalMaxval, 1, & + MPI_INTEGER, MPI_MAX, communicator, ierr) +#endif + +!----------------------------------------------------------------------- + + end function global_maxval_scalar_int_nodist + !*********************************************************************** function global_minval_dbl (array, dist, lMask) & @@ -2179,6 +2231,55 @@ function global_minval_scalar_int (scalar, dist) & end function global_minval_scalar_int !*********************************************************************** + + function global_minval_scalar_int_nodist (scalar, communicator) & + result(globalMinval) + +! Computes the global minimum value of a scalar value across +! a distributed machine. +! +! This is actually the specific interface for the generic global_minval +! function corresponding to single precision scalars. + + integer (int_kind), intent(in) :: & + scalar ! scalar for which min value needed + + integer(int_kind), intent(in) :: & + communicator ! mpi communicator + + integer (int_kind) :: & + globalMinval ! resulting minimum value + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + ierr ! mpi error flag + + character(len=*), parameter :: subname = '(global_minval_scalar_int_nodist)' + +!----------------------------------------------------------------------- + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local minval to global minval +! +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + globalMinval = scalar +#else + call MPI_ALLREDUCE(scalar, globalMinval, 1, & + MPI_INTEGER, MPI_MIN, communicator, ierr) +#endif + +!----------------------------------------------------------------------- + + end function global_minval_scalar_int_nodist + !*********************************************************************** subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_broadcast.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_broadcast.F90 index 8532f23b7..75d0be4ca 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_broadcast.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_broadcast.F90 @@ -1,16 +1,23 @@ !||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||||| +#define SERIAL_REMOVE_MPI module ice_broadcast ! This module contains all the broadcast routines. This -! particular version contains serial versions of these routines -! which typically perform no operations since there is no need -! to broadcast what is already known. +! particular version contains MPI versions of these routines. ! ! author: Phil Jones, LANL ! Oct. 2004: Adapted from POP version by William H. Lipscomb, LANL +#ifndef SERIAL_REMOVE_MPI + use mpi ! MPI Fortran module +#endif use ice_kinds_mod +#ifdef SERIAL_REMOVE_MPI + use ice_communicate, only: MPI_COMM_ICE +#else + use ice_communicate, only: mpiR8, mpir4, MPI_COMM_ICE +#endif use ice_exit, only: abort_ice use icepack_intfc, only: icepack_warnings_flush, icepack_warnings_aborted @@ -67,19 +74,31 @@ subroutine broadcast_scalar_dbl(scalar, root_pe) real (dbl_kind), intent(inout) :: & scalar ! scalar to be broadcast - character(len=*), parameter :: subname = '(broadcast_scalar_dbl)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! !----------------------------------------------------------------------- - - end subroutine broadcast_scalar_dbl + + integer (int_kind) :: ierr ! local MPI error flag + character(len=*), parameter :: subname = '(broadcast_scalar_dbl)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + call MPI_BCAST(scalar, 1, mpiR8, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + +!----------------------------------------------------------------------- + +end subroutine broadcast_scalar_dbl !*********************************************************************** - subroutine broadcast_scalar_real(scalar, root_pe) +subroutine broadcast_scalar_real(scalar, root_pe) ! Broadcasts a scalar real variable from one processor (root_pe) ! to all other processors. This is a specific instance of the generic @@ -91,19 +110,31 @@ subroutine broadcast_scalar_real(scalar, root_pe) real (real_kind), intent(inout) :: & scalar ! scalar to be broadcast - character(len=*), parameter :: subname = '(broadcast_scalar_real)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind) :: ierr ! local MPI error flag + character(len=*), parameter :: subname = '(broadcast_scalar_real)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + call MPI_BCAST(scalar, 1, mpiR4, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_scalar_real !*********************************************************************** - subroutine broadcast_scalar_int(scalar, root_pe) +subroutine broadcast_scalar_int(scalar, root_pe) ! Broadcasts a scalar integer variable from one processor (root_pe) ! to all other processors. This is a specific instance of the generic @@ -115,19 +146,31 @@ subroutine broadcast_scalar_int(scalar, root_pe) integer (int_kind), intent(inout) :: & scalar ! scalar to be broadcast - character(len=*), parameter :: subname = '(broadcast_scalar_int)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind) :: ierr ! local MPI error flag + character(len=*), parameter :: subname = '(broadcast_scalar_int)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + call MPI_BCAST(scalar, 1, MPI_INTEGER, root_pe, MPI_COMM_ICE,ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_scalar_int !*********************************************************************** - subroutine broadcast_scalar_log(scalar, root_pe) +subroutine broadcast_scalar_log(scalar, root_pe) ! Broadcasts a scalar logical variable from one processor (root_pe) ! to all other processors. This is a specific instance of the generic @@ -139,19 +182,45 @@ subroutine broadcast_scalar_log(scalar, root_pe) logical (log_kind), intent(inout) :: & scalar ! scalar to be broadcast - character(len=*), parameter :: subname = '(broadcast_scalar_log)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + itmp, &! local temporary + ierr ! MPI error flag + character(len=*), parameter :: subname = '(broadcast_scalar_log)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + if (scalar) then + itmp = 1 + else + itmp = 0 + endif + + call MPI_BCAST(itmp, 1, MPI_INTEGER, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + + if (itmp == 1) then + scalar = .true. + else + scalar = .false. + endif +#endif + !----------------------------------------------------------------------- end subroutine broadcast_scalar_log !*********************************************************************** - subroutine broadcast_scalar_char(scalar, root_pe) +subroutine broadcast_scalar_char(scalar, root_pe) ! Broadcasts a scalar character variable from one processor (root_pe) ! to all other processors. This is a specific instance of the generic @@ -163,19 +232,35 @@ subroutine broadcast_scalar_char(scalar, root_pe) character (*), intent(inout) :: & scalar ! scalar to be broadcast - character(len=*), parameter :: subname = '(broadcast_scalar_char)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! !----------------------------------------------------------------------- + integer (int_kind) :: & + clength, &! length of character + ierr ! MPI error flag + character(len=*), parameter :: subname = '(broadcast_scalar_char)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + clength = len(scalar) + + call MPI_BCAST(scalar, clength, MPI_CHARACTER, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + +!-------------------------------------------------------------------- + end subroutine broadcast_scalar_char !*********************************************************************** - subroutine broadcast_array_dbl_1d(array, root_pe) +subroutine broadcast_array_dbl_1d(array, root_pe) ! Broadcasts a vector dbl variable from one processor (root_pe) ! to all other processors. This is a specific instance of the generic @@ -187,19 +272,35 @@ subroutine broadcast_array_dbl_1d(array, root_pe) real (dbl_kind), dimension(:), intent(inout) :: & array ! array to be broadcast - character(len=*), parameter :: subname = '(broadcast_array_dbl_1d)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array + ierr ! local MPI error flag + character(len=*), parameter :: subname = '(broadcast_array_dbl_1d)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + nelements = size(array) + + call MPI_BCAST(array, nelements, mpiR8, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_array_dbl_1d !*********************************************************************** - subroutine broadcast_array_real_1d(array, root_pe) +subroutine broadcast_array_real_1d(array, root_pe) ! Broadcasts a real vector from one processor (root_pe) ! to all other processors. This is a specific instance of the generic @@ -211,19 +312,35 @@ subroutine broadcast_array_real_1d(array, root_pe) real (real_kind), dimension(:), intent(inout) :: & array ! array to be broadcast - character(len=*), parameter :: subname = '(broadcast_array_real_1d)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + character(len=*), parameter :: subname = '(broadcast_array_real_1d)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + nelements = size(array) + + call MPI_BCAST(array, nelements, mpiR4, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_array_real_1d !*********************************************************************** - subroutine broadcast_array_int_1d(array, root_pe) +subroutine broadcast_array_int_1d(array, root_pe) ! Broadcasts an integer vector from one processor (root_pe) ! to all other processors. This is a specific instance of the generic @@ -235,19 +352,35 @@ subroutine broadcast_array_int_1d(array, root_pe) integer (int_kind), dimension(:), intent(inout) :: & array ! array to be broadcast - character(len=*), parameter :: subname = '(broadcast_array_int_1d)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + character(len=*), parameter :: subname = '(broadcast_array_int_1d)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + nelements = size(array) + + call MPI_BCAST(array, nelements, MPI_INTEGER, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_array_int_1d !*********************************************************************** - subroutine broadcast_array_log_1d(array, root_pe) +subroutine broadcast_array_log_1d(array, root_pe) ! Broadcasts a logical vector from one processor (root_pe) ! to all other processors. This is a specific instance of the generic @@ -259,12 +392,48 @@ subroutine broadcast_array_log_1d(array, root_pe) logical (log_kind), dimension(:), intent(inout) :: & array ! array to be broadcast - character(len=*), parameter :: subname = '(broadcast_array_log_1d)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:), allocatable :: & + array_int ! temporary array for MPI bcast + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + + character(len=*), parameter :: subname = '(broadcast_array_log_1d)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + nelements = size(array) + allocate(array_int(nelements)) + + where (array) + array_int = 1 + elsewhere + array_int = 0 + end where + + call MPI_BCAST(array_int, nelements, MPI_INTEGER, root_pe, & + MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + + where (array_int == 1) + array = .true. + elsewhere + array = .false. + end where + + deallocate(array_int) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_array_log_1d @@ -283,12 +452,28 @@ subroutine broadcast_array_dbl_2d(array, root_pe) real (dbl_kind), dimension(:,:), intent(inout) :: & array ! array to be broadcast - character(len=*), parameter :: subname = '(broadcast_array_dbl_2d)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array + ierr ! local MPI error flag + character(len=*), parameter :: subname = '(broadcast_array_dbl_2d)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + nelements = size(array) + + call MPI_BCAST(array, nelements, mpiR8, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_array_dbl_2d @@ -307,12 +492,28 @@ subroutine broadcast_array_real_2d(array, root_pe) real (real_kind), dimension(:,:), intent(inout) :: & array ! array to be broadcast - character(len=*), parameter :: subname = '(broadcast_array_real_2d)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + character(len=*), parameter :: subname = '(broadcast_array_real_2d)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + nelements = size(array) + + call MPI_BCAST(array, nelements, mpiR4, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_array_real_2d @@ -331,12 +532,28 @@ subroutine broadcast_array_int_2d(array, root_pe) integer (int_kind), dimension(:,:), intent(inout) :: & array ! array to be broadcast - character(len=*), parameter :: subname = '(broadcast_array_int_2d)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + character(len=*), parameter :: subname = '(broadcast_array_int_2d)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + nelements = size(array) + + call MPI_BCAST(array, nelements, MPI_INTEGER, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_array_int_2d @@ -355,12 +572,48 @@ subroutine broadcast_array_log_2d(array, root_pe) logical (log_kind), dimension(:,:), intent(inout) :: & array ! array to be broadcast - character(len=*), parameter :: subname = '(broadcast_array_log_2d)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:,:), allocatable :: & + array_int ! temporary array for MPI bcast + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + + character(len=*), parameter :: subname = '(broadcast_array_log_2d)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + nelements = size(array) + allocate(array_int(size(array,dim=1),size(array,dim=2))) + + where (array) + array_int = 1 + elsewhere + array_int = 0 + end where + + call MPI_BCAST(array_int, nelements, MPI_INTEGER, root_pe, & + MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + + where (array_int == 1) + array = .true. + elsewhere + array = .false. + end where + + deallocate(array_int) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_array_log_2d @@ -379,12 +632,28 @@ subroutine broadcast_array_dbl_3d(array, root_pe) real (dbl_kind), dimension(:,:,:), intent(inout) :: & array ! array to be broadcast - character(len=*), parameter :: subname = '(broadcast_array_dbl_3d)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array + ierr ! local MPI error flag + character(len=*), parameter :: subname = '(broadcast_array_dbl_3d)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + nelements = size(array) + + call MPI_BCAST(array, nelements, mpiR8, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_array_dbl_3d @@ -403,12 +672,28 @@ subroutine broadcast_array_real_3d(array, root_pe) real (real_kind), dimension(:,:,:), intent(inout) :: & array ! array to be broadcast - character(len=*), parameter :: subname = '(broadcast_array_real_3d)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + character(len=*), parameter :: subname = '(broadcast_array_real_3d)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + nelements = size(array) + + call MPI_BCAST(array, nelements, mpiR4, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_array_real_3d @@ -427,12 +712,28 @@ subroutine broadcast_array_int_3d(array, root_pe) integer (int_kind), dimension(:,:,:), intent(inout) :: & array ! array to be broadcast - character(len=*), parameter :: subname = '(broadcast_array_int_3d)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + character(len=*), parameter :: subname = '(broadcast_array_int_3d)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + nelements = size(array) + + call MPI_BCAST(array, nelements, MPI_INTEGER, root_pe, MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_array_int_3d @@ -451,12 +752,50 @@ subroutine broadcast_array_log_3d(array, root_pe) logical (log_kind), dimension(:,:,:), intent(inout) :: & array ! array to be broadcast - character(len=*), parameter :: subname = '(broadcast_array_log_3d)' - !----------------------------------------------------------------------- ! -! for serial codes, nothing is required +! local variables ! +!----------------------------------------------------------------------- + + integer (int_kind), dimension(:,:,:), allocatable :: & + array_int ! temporary array for MPI bcast + + integer (int_kind) :: & + nelements, &! size of array to be broadcast + ierr ! local MPI error flag + + character(len=*), parameter :: subname = '(broadcast_array_log_3d)' + +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + ! nothing to do +#else + nelements = size(array) + allocate(array_int(size(array,dim=1), & + size(array,dim=2), & + size(array,dim=3))) + + where (array) + array_int = 1 + elsewhere + array_int = 0 + end where + + call MPI_BCAST(array_int, nelements, MPI_INTEGER, root_pe, & + MPI_COMM_ICE, ierr) + call MPI_BARRIER(MPI_COMM_ICE, ierr) + + where (array_int == 1) + array = .true. + elsewhere + array = .false. + end where + + deallocate(array_int) +#endif + !----------------------------------------------------------------------- end subroutine broadcast_array_log_3d diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_communicate.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_communicate.F90 index c9df264dd..ed11aafec 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_communicate.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_communicate.F90 @@ -18,6 +18,7 @@ module ice_communicate public :: init_communicate, & get_num_procs, & + get_rank, & ice_barrier, & create_communicator @@ -85,6 +86,29 @@ function get_num_procs() end function get_num_procs +!*********************************************************************** + + function get_rank() + +! This function returns the number of processors assigned to +! the ice model. + + integer (int_kind) :: get_rank + + character(len=*), parameter :: subname = '(get_rank)' + +!----------------------------------------------------------------------- +! +! serial execution, must be only 1 +! +!----------------------------------------------------------------------- + + get_rank = 0 + +!----------------------------------------------------------------------- + + end function get_rank + !*********************************************************************** subroutine ice_barrier() diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 index 4d53e873e..049eae6ec 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 @@ -75,7 +75,8 @@ module ice_global_reductions global_maxval_int, & global_maxval_scalar_dbl, & global_maxval_scalar_real, & - global_maxval_scalar_int + global_maxval_scalar_int, & + global_maxval_scalar_int_nodist end interface interface global_minval @@ -84,7 +85,8 @@ module ice_global_reductions global_minval_int, & global_minval_scalar_dbl, & global_minval_scalar_real, & - global_minval_scalar_int + global_minval_scalar_int, & + global_minval_scalar_int_nodist end interface !*********************************************************************** @@ -1684,6 +1686,56 @@ function global_maxval_scalar_int (scalar, dist) & end function global_maxval_scalar_int +!*********************************************************************** + + function global_maxval_scalar_int_nodist (scalar, communicator) & + result(globalMaxval) + +! Computes the global maximum value of a scalar value across +! a distributed machine. +! +! This is actually the specific interface for the generic global_maxval +! function corresponding to single precision scalars. + + integer (int_kind), intent(in) :: & + scalar ! scalar for which max value needed + + integer (int_kind), intent(in) :: & + communicator ! mpi communicator + + integer (int_kind) :: & + globalMaxval ! resulting maximum value + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + ierr ! mpi error flag + + character(len=*), parameter :: subname = '(global_maxval_scalar_int_nodist)' + +!----------------------------------------------------------------------- + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local maxval to global maxval +! +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + globalMaxval = scalar +#else + call MPI_ALLREDUCE(scalar, globalMaxval, 1, & + MPI_INTEGER, MPI_MAX, communicator, ierr) +#endif + +!----------------------------------------------------------------------- + + end function global_maxval_scalar_int_nodist + !*********************************************************************** function global_minval_dbl (array, dist, lMask) & @@ -2180,6 +2232,55 @@ function global_minval_scalar_int (scalar, dist) & end function global_minval_scalar_int !*********************************************************************** + + function global_minval_scalar_int_nodist (scalar, communicator) & + result(globalMinval) + +! Computes the global minimum value of a scalar value across +! a distributed machine. +! +! This is actually the specific interface for the generic global_minval +! function corresponding to single precision scalars. + + integer (int_kind), intent(in) :: & + scalar ! scalar for which min value needed + + integer(int_kind), intent(in) :: & + communicator ! mpi communicator + + integer (int_kind) :: & + globalMinval ! resulting minimum value + +!----------------------------------------------------------------------- +! +! local variables +! +!----------------------------------------------------------------------- + + integer (int_kind) :: & + ierr ! mpi error flag + + character(len=*), parameter :: subname = '(global_minval_scalar_int_nodist)' + +!----------------------------------------------------------------------- + +!----------------------------------------------------------------------- +! +! now use MPI global reduction to reduce local minval to global minval +! +!----------------------------------------------------------------------- + +#ifdef SERIAL_REMOVE_MPI + globalMinval = scalar +#else + call MPI_ALLREDUCE(scalar, globalMinval, 1, & + MPI_INTEGER, MPI_MIN, communicator, ierr) +#endif + +!----------------------------------------------------------------------- + + end function global_minval_scalar_int_nodist + !*********************************************************************** subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) diff --git a/cicecore/cicedynB/infrastructure/ice_blocks.F90 b/cicecore/cicedynB/infrastructure/ice_blocks.F90 index 2768a40c3..74aba9cb5 100644 --- a/cicecore/cicedynB/infrastructure/ice_blocks.F90 +++ b/cicecore/cicedynB/infrastructure/ice_blocks.F90 @@ -316,10 +316,11 @@ subroutine create_blocks(nx_global, ny_global, ew_boundary_type, & if (my_task == master_task) then write(nu_diag,*) 'block i,j locations' do n = 1, nblocks_tot - write(nu_diag,*) 'block id, iblock, jblock:', & + write(nu_diag,*) 'block id, iblock, jblock, tripole:', & all_blocks(n)%block_id, & all_blocks(n)%iblock, & - all_blocks(n)%jblock + all_blocks(n)%jblock, & + all_blocks(n)%tripole enddo endif endif diff --git a/cicecore/drivers/unittest/bcstchk/bcstchk.F90 b/cicecore/drivers/unittest/bcstchk/bcstchk.F90 new file mode 100644 index 000000000..56e8a4421 --- /dev/null +++ b/cicecore/drivers/unittest/bcstchk/bcstchk.F90 @@ -0,0 +1,281 @@ + + program bcstchk + + use ice_kinds_mod, only: int_kind, dbl_kind, real_kind, log_kind + use ice_communicate, only: my_task, master_task, get_num_procs, get_rank, MPI_COMM_ICE + use ice_communicate, only: init_communicate, get_num_procs, ice_barrier + use ice_global_reductions, only: global_maxval + use ice_fileunits, only: flush_fileunit + use ice_exit, only: abort_ice, end_run + use ice_broadcast + + implicit none + + integer(int_kind) :: n, k, k1, k2, k3 + + integer(int_kind), parameter :: dsize = 10 + integer(int_kind) :: ival, i0, i1(dsize), i2(dsize,dsize), i3(dsize,dsize,dsize) + logical(log_kind) :: lval, l0, l1(dsize), l2(dsize,dsize), l3(dsize,dsize,dsize) + real(real_kind) :: rval, r0, r1(dsize), r2(dsize,dsize), r3(dsize,dsize,dsize) + real(dbl_kind) :: dval, d0, d1(dsize), d2(dsize,dsize), d3(dsize,dsize,dsize) + character(len=32) :: cval, c0 + + real(dbl_kind) :: xval + + integer(int_kind), parameter :: ntests1 = 17 + character(len=8) :: errorflag1(ntests1) + character(len=32) :: stringflag1(ntests1) + + integer(int_kind) :: ierr, npes, bcst_pe + integer(int_kind) :: iflag, gflag + character(len=8) :: errorflag0 + character(len=16) :: teststr + character(len=*), parameter :: & + passflag = 'PASS', & + failflag = 'FAIL' + + character(len=*), parameter :: subname = '(bcstchk)' + + ! --------------------------- + + call init_communicate() + npes = get_num_procs() + my_task = get_rank() + master_task = 0 + + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) '==========================================================' + write(6,*) ' ' + write(6,*) 'Running BCSTCHK' + write(6,*) ' ' + write(6,*) ' npes = ',npes + write(6,*) ' my_task = ',my_task + write(6,*) ' ' + endif + + errorflag0 = passflag + errorflag1 = passflag + stringflag1 = ' ' + + ! --------------------------- + ! Test ice_broadcast methods + ! Test broadcast from root and from npes + ! --------------------------- + + do k = 1,2 + if (k == 1) then + bcst_pe = 0 + else + bcst_pe = max(npes,1) - 1 + endif + if (my_task == master_task) then + write(6,*) ' ' + write(6,*) ' bcst_pe = ',bcst_pe + endif + + xval = -999._dbl_kind + rval = 21.5_real_kind + real(bcst_pe,kind=real_kind) + dval = 17.3_dbl_kind + real(bcst_pe,kind=dbl_kind) + ival = 223 + bcst_pe + write(cval,'(a,i4.4)') 'string is passed from ',bcst_pe + lval = (k == 1) + + do n = 1,ntests1 + i0 = xval + i1 = xval + i2 = xval + i3 = xval + r0 = xval + r1 = xval + r2 = xval + r3 = xval + d0 = xval + d1 = xval + d2 = xval + d3 = xval + l0 = .not.lval + l1 = .not.lval + l2 = .not.lval + l3 = .not.lval + c0 = 'nothing to see here' + + if (my_task == bcst_pe) then + i0 = ival + i1 = ival + i2 = ival + i3 = ival + r0 = rval + r1 = rval + r2 = rval + r3 = rval + d0 = dval + d1 = dval + d2 = dval + d3 = dval + l0 = lval + l1 = lval + l2 = lval + l3 = lval + c0 = cval + endif + + iflag = 0 + gflag = -1 + write(teststr,'(a,1x,i2.2)') ' test',n + if (n == 1) then + stringflag1(n) = ' bcst_scalar_dbl' + call broadcast_scalar(d0, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),d0,dval + if (d0 /= dval) iflag=1 + elseif (n == 2) then + stringflag1(n) = ' bcst_array_dbl_1d' + call broadcast_array(d1, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),minval(d1),maxval(d1),dval + if (minval(d1) /= dval) iflag=1 + if (maxval(d1) /= dval) iflag=1 + elseif (n == 3) then + stringflag1(n) = ' bcst_array_dbl_2d' + call broadcast_array(d2, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),minval(d2),maxval(d2),dval + if (minval(d2) /= dval) iflag=1 + if (maxval(d2) /= dval) iflag=1 + elseif (n == 4) then + stringflag1(n) = ' bcst_array_dbl_3d' + call broadcast_array(d3, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),minval(d3),maxval(d3),dval + if (minval(d3) /= dval) iflag=1 + if (maxval(d3) /= dval) iflag=1 + elseif (n == 5) then + stringflag1(n) = ' bcst_scalar_real' + call broadcast_scalar(r0, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),r0,rval + if (r0 /= rval) iflag=1 + elseif (n == 6) then + stringflag1(n) = ' bcst_array_real_1d' + call broadcast_array(r1, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),minval(r1),maxval(r1),rval + if (minval(r1) /= rval) iflag=1 + if (maxval(r1) /= rval) iflag=1 + elseif (n == 7) then + stringflag1(n) = ' bcst_array_real_2d' + call broadcast_array(r2, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),minval(r2),maxval(r2),rval + if (minval(r2) /= rval) iflag=1 + if (maxval(r2) /= rval) iflag=1 + elseif (n == 8) then + stringflag1(n) = ' bcst_array_real_3d' + call broadcast_array(r3, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),minval(r3),maxval(r3),rval + if (minval(r3) /= rval) iflag=1 + if (maxval(r3) /= rval) iflag=1 + elseif (n == 9) then + stringflag1(n) = ' bcst_scalar_int' + call broadcast_scalar(i0, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),i0,ival + if (i0 /= ival) iflag=1 + elseif (n == 10) then + stringflag1(n) = ' bcst_array_int_1d' + call broadcast_array(i1, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),minval(i1),maxval(i1),ival + if (minval(i1) /= ival) iflag=1 + if (maxval(i1) /= ival) iflag=1 + elseif (n == 11) then + stringflag1(n) = ' bcst_array_int_2d' + call broadcast_array(i2, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),minval(i2),maxval(i2),ival + if (minval(i2) /= ival) iflag=1 + if (maxval(i2) /= ival) iflag=1 + elseif (n == 12) then + stringflag1(n) = ' bcst_array_int_3d' + call broadcast_array(i3, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),minval(i3),maxval(i3),ival + if (minval(i3) /= ival) iflag=1 + if (maxval(i3) /= ival) iflag=1 + elseif (n == 13) then + stringflag1(n) = ' bcst_scalar_logical' + call broadcast_scalar(l0, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),l0,lval + if (l0 .neqv. lval) iflag=1 + elseif (n == 14) then + stringflag1(n) = ' bcst_array_logical_1d' + call broadcast_array(l1, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),l1(1),lval + do k1 = 1,dsize + if (l1(k1) .neqv. lval) iflag=1 + enddo + elseif (n == 15) then + stringflag1(n) = ' bcst_array_logical_2d' + call broadcast_array(l2, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),l2(1,1),lval + do k2 = 1,dsize + do k1 = 1,dsize + if (l2(k1,k2) .neqv. lval) iflag=1 + enddo + enddo + elseif (n == 16) then + stringflag1(n) = ' bcst_array_logical_3d' + call broadcast_array(l3, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),l3(1,1,1),lval + do k3 = 1,dsize + do k2 = 1,dsize + do k1 = 1,dsize + if (l3(k1,k2,k3) .neqv. lval) iflag=1 + enddo + enddo + enddo + elseif (n == 17) then + stringflag1(n) = ' bcst_scalar_char' + call broadcast_scalar(c0, bcst_pe) + if (my_task == master_task) write(6,*) trim(teststr),trim(stringflag1(n)),' ',trim(c0),' : ',trim(cval) + if (c0 /= cval) iflag=1 + else + call abort_ice(subname//' illegal k bcst',file=__FILE__,line=__LINE__) + endif + + gflag = global_maxval(iflag, MPI_COMM_ICE) + if (gflag /= 0) then + if (my_task == master_task) write(6,*) ' **** ERROR test ',n + errorflag1(n) = failflag + errorflag0 = failflag + endif + enddo ! n + enddo ! k + + call flush_fileunit(6) + call ice_barrier() + + ! --------------------------- + + if (my_task == master_task) then + write(6,*) ' ' + do k = 1,ntests1 + write(6,*) errorflag1(k),stringflag1(k) + enddo + write(6,*) ' ' + if (errorflag0 == passflag) then + write(6,*) 'BCSTCHK COMPLETED SUCCESSFULLY' + else + write(6,*) 'BCSTCHK FAILED' + call abort_ice(subname//' ERROR: BCSTCHK FAILED',file=__FILE__,line=__LINE__) + endif + write(6,*) ' ' + write(6,*) '==========================================================' + write(6,*) ' ' + write(6,*) 'NOTE: We are testing the abort now so you should see an abort to follow' + write(6,*) 'The BCSTCHK passed, so please ignore the abort' + write(6,*) ' ' + endif + + ! Test abort_ice, regardless of test outcome + call flush_fileunit(6) + call ice_barrier() + call abort_ice(subname//' Test abort ',file=__FILE__,line=__LINE__) + + if (my_task == master_task) write(6,*) subname,'This line should not be written' + + call end_run() + + end program bcstchk + +!======================================================================= diff --git a/cicecore/drivers/unittest/sumchk/sumchk.F90 b/cicecore/drivers/unittest/sumchk/sumchk.F90 index 27379cf8a..6a692fa0d 100644 --- a/cicecore/drivers/unittest/sumchk/sumchk.F90 +++ b/cicecore/drivers/unittest/sumchk/sumchk.F90 @@ -5,11 +5,12 @@ program sumchk use CICE_FinalMod use ice_kinds_mod, only: int_kind, dbl_kind, real_kind use ice_communicate, only: my_task, master_task, get_num_procs + use ice_domain_size, only: nx_global, ny_global use ice_domain_size, only: block_size_x, block_size_y, max_blocks use ice_domain, only: distrb_info use ice_blocks, only: block, get_block, nx_block, ny_block, nblocks_tot use ice_distribution, only: ice_distributionGetBlockID, ice_distributionGet - use ice_constants, only: field_loc_center + use ice_constants, only: field_loc_Nface use ice_fileunits, only: bfbflag use ice_global_reductions use ice_exit, only: abort_ice @@ -33,7 +34,7 @@ program sumchk real(dbl_kind) :: locval, corval, minval, maxval ! local, correct, min, max values real(dbl_kind) :: locval8, sumval8, minval8, maxval8 real(real_kind) :: locval4, sumval4, minval4, maxval4 - integer(int_kind) :: locvali, sumvali, corvali, minvali, maxvali + integer(int_kind) :: iocval, locvali, sumvali, corvali, minvali, maxvali real(dbl_kind) :: lscale real(dbl_kind) :: reldig,reldigchk_now real(dbl_kind) ,allocatable :: reldigchk(:,:) @@ -88,9 +89,11 @@ program sumchk write(6,*) ' ' write(6,*) ' npes = ',npes write(6,*) ' my_task = ',my_task - write(6,*) ' nblocks_tot = ',nblocks_tot + write(6,*) ' nx_global = ',nx_global + write(6,*) ' ny_global = ',ny_global write(6,*) ' block_size_x = ',block_size_x write(6,*) ' block_size_y = ',block_size_y + write(6,*) ' nblocks_tot = ',nblocks_tot write(6,*) ' ' endif @@ -130,11 +133,19 @@ program sumchk ! set corval to something a little interesting (not 1.0 for instance which gives atypical results) corval = 4.0_dbl_kind/3.0_dbl_kind - locval = corval / real(nblocks_tot*(block_size_x*block_size_y-2),dbl_kind) - corvali = 92544 + iocval = 8 + if (nx_global == 360 .and. ny_global == 240) then + locval = corval / real((nblocks_tot*(block_size_x*block_size_y-2)-nx_global/2),dbl_kind) + corvali = (nblocks_tot*(block_size_x*block_size_y-2)-nx_global/2)*iocval + elseif (nx_global == 100 .and. ny_global == 116) then + locval = corval / real(nblocks_tot*(block_size_x*block_size_y-2),dbl_kind) + corvali = nblocks_tot*(block_size_x*block_size_y-2)*iocval + else + call abort_ice(subname//' ERROR not set for this grid ') + endif if (my_task == master_task) then -! write(6,*) ' local array value = ',locval + write(6,*) ' local array value = ',locval write(6,*) ' correct value = ',corval write(6,*) ' correct value int = ',corvali write(6,*) ' ' @@ -157,6 +168,10 @@ program sumchk reldigchk(3,4) = 3. reldigchk(4,4) = 0. reldigchk(5,4) = 15. + if (nx_global == 360 .and. ny_global == 240) then + reldigchk(1:2,1) = 13. + reldigchk(5,4) = 14. + endif ! test list n = 1 ; stringflag1(n) = 'dble sum easy' @@ -202,18 +217,18 @@ program sumchk jb = this_block%jlo je = this_block%jhi - lmask(ie,je,iblock) = .false. - lmask(ie,je-1,iblock) = .false. - arrayA(ie,je,iblock) = locval * lscale - arrayA(ie,je-1,iblock) = -arrayA(ie,je,iblock) - arrayB(ie,je,iblock) = locval * lscale - arrayB(ie,je-1,iblock) = arrayB(ie,je,iblock) + lmask(ie,je-1,iblock) = .false. + lmask(ie,je-2,iblock) = .false. + arrayA(ie,je-1,iblock) = locval * lscale + arrayA(ie,je-2,iblock) = -arrayA(ie,je-1,iblock) + arrayB(ie,je-1,iblock) = locval * lscale + arrayB(ie,je-2,iblock) = arrayB(ie,je-1,iblock) arrayC(ib,jb,iblock) = locval * lscale - arrayC(ib+1,jb,iblock) = -arrayA(ie,je,iblock) - arrayiA(:,:,iblock) = 8 - arrayiB(:,:,iblock) = 8 - arrayiA(ie,je,iblock) = 137 * 4 - arrayiA(ie,je-1,iblock) = -arrayiA(ie,je,iblock) + arrayC(ib+1,jb,iblock) = -arrayC(ib,jb,iblock) + arrayiA(:,:,iblock) = iocval + arrayiB(:,:,iblock) = iocval + arrayiA(ie,je-1,iblock) = 13 * iocval + arrayiA(ie,je-2,iblock) = -arrayiA(ie,je-1,iblock) enddo do k = 1,ntests1 @@ -225,82 +240,82 @@ program sumchk if (k == 1) then array8(:,:,:) = arrayC(:,:,:) - sumval8 = global_sum(array8, distrb_info, field_loc_center) + sumval8 = global_sum(array8, distrb_info, field_loc_Nface) elseif (k == 2) then array8(:,:,:) = arrayA(:,:,:) - sumval8 = global_sum(array8, distrb_info, field_loc_center) + sumval8 = global_sum(array8, distrb_info, field_loc_Nface) elseif (k == 3) then array4(:,:,:) = arrayA(:,:,:) - sumval4 = global_sum(array4, distrb_info, field_loc_center) + sumval4 = global_sum(array4, distrb_info, field_loc_Nface) sumval8 = sumval4 elseif (k == 4) then arrayi1 = arrayiA - sumvali = global_sum(arrayi1, distrb_info, field_loc_center) + sumvali = global_sum(arrayi1, distrb_info, field_loc_Nface) elseif (k == 5) then mmask8(:,:,:) = 6.0_dbl_kind array8(:,:,:) = arrayA(:,:,:)/mmask8(:,:,:) - sumval8 = global_sum(array8, distrb_info, field_loc_center, mmask=mmask8) + sumval8 = global_sum(array8, distrb_info, field_loc_Nface, mmask=mmask8) elseif (k == 6) then mmask4(:,:,:) = 6.0_real_kind array4(:,:,:) = arrayA(:,:,:)/mmask4(:,:,:) - sumval4 = global_sum(array4, distrb_info, field_loc_center, mmask=mmask4) + sumval4 = global_sum(array4, distrb_info, field_loc_Nface, mmask=mmask4) sumval8 = sumval4 elseif (k == 7) then mmaski(:,:,:) = 2 arrayi1(:,:,:) = arrayiA(:,:,:)/mmaski(:,:,:) - sumvali = global_sum(arrayi1, distrb_info, field_loc_center, mmask=mmaski) + sumvali = global_sum(arrayi1, distrb_info, field_loc_Nface, mmask=mmaski) elseif (k == 8) then array8(:,:,:) = arrayB(:,:,:) - sumval8 = global_sum(array8, distrb_info, field_loc_center, lmask=lmask) + sumval8 = global_sum(array8, distrb_info, field_loc_Nface, lmask=lmask) elseif (k == 9) then array4(:,:,:) = arrayB(:,:,:) - sumval4 = global_sum(array4, distrb_info, field_loc_center, lmask=lmask) + sumval4 = global_sum(array4, distrb_info, field_loc_Nface, lmask=lmask) sumval8 = sumval4 elseif (k == 10) then arrayi1(:,:,:) = arrayiB(:,:,:) - sumvali = global_sum(arrayi1, distrb_info, field_loc_center, lmask=lmask) + sumvali = global_sum(arrayi1, distrb_info, field_loc_Nface, lmask=lmask) elseif (k == 11) then array82(:,:,:) = 7.0_dbl_kind array8(:,:,:) = arrayA(:,:,:)/array82(:,:,:) - sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc_center) + sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc_Nface) elseif (k == 12) then array42(:,:,:) = 7.0_real_kind array4(:,:,:) = arrayA(:,:,:)/array42(:,:,:) - sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc_center) + sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc_Nface) sumval8 = sumval4 elseif (k == 13) then arrayi2(:,:,:) = 4 arrayi1(:,:,:) = arrayiA(:,:,:)/arrayi2(:,:,:) - sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc_center) + sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc_Nface) elseif (k == 14) then array82(:,:,:) = 7.0_dbl_kind mmask8(:,:,:) = 6.0_dbl_kind array8(:,:,:) = arrayA(:,:,:)/(mmask8(:,:,:)*array82(:,:,:)) - sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc_center, mmask=mmask8) + sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc_Nface, mmask=mmask8) elseif (k == 15) then array42(:,:,:) = 7.0_real_kind mmask4(:,:,:) = 6.0_real_kind array4(:,:,:) = arrayA(:,:,:)/(mmask4(:,:,:)*array42(:,:,:)) - sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc_center, mmask=mmask4) + sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc_Nface, mmask=mmask4) sumval8 = sumval4 elseif (k == 16) then arrayi2(:,:,:) = 2 mmaski(:,:,:) = 2 arrayi1(:,:,:) = arrayiA(:,:,:)/(arrayi2(:,:,:)*mmaski(:,:,:)) - sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc_center, mmask=mmaski) + sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc_Nface, mmask=mmaski) elseif (k == 17) then array82(:,:,:) = 7.0_dbl_kind array8(:,:,:) = arrayB(:,:,:)/array82(:,:,:) - sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc_center, lmask=lmask) + sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc_Nface, lmask=lmask) elseif (k == 18) then array42(:,:,:) = 7.0_real_kind array4(:,:,:) = arrayB(:,:,:)/array42(:,:,:) - sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc_center, lmask=lmask) + sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc_Nface, lmask=lmask) sumval8 = sumval4 elseif (k == 19) then arrayi2(:,:,:) = 4 arrayi1(:,:,:) = arrayiB(:,:,:)/(arrayi2(:,:,:)) - sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc_center, lmask=lmask) + sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc_Nface, lmask=lmask) else call abort_ice(subname//' illegal k sum',file=__FILE__,line=__LINE__) endif diff --git a/configuration/scripts/Makefile b/configuration/scripts/Makefile index de3658a3e..51c36cee3 100644 --- a/configuration/scripts/Makefile +++ b/configuration/scripts/Makefile @@ -75,7 +75,7 @@ AR := ar .SUFFIXES: .SUFFIXES: .F90 .F .c .o -.PHONY: all cice libcice targets target db_files db_flags clean realclean helloworld calchk sumchk +.PHONY: all cice libcice targets target db_files db_flags clean realclean helloworld calchk sumchk bcstchk all: $(EXEC) cice: $(EXEC) @@ -94,7 +94,7 @@ targets: @echo " " @echo "Supported Makefile Targets are: cice, libcice, makdep, depends, clean, realclean" @echo " Diagnostics: targets, db_files, db_flags" - @echo " Unit Tests : helloworld, calchk, sumchk" + @echo " Unit Tests : helloworld, calchk, sumchk, bcstchk" target: targets db_files: @@ -148,6 +148,8 @@ calchk: $(EXEC) sumchk: $(EXEC) +bcstchk: $(EXEC) + # this builds just a subset of source code specified explicitly and requires a separate target HWOBJS := helloworld.o diff --git a/configuration/scripts/options/set_env.bcstchk b/configuration/scripts/options/set_env.bcstchk new file mode 100644 index 000000000..bf6b49bd2 --- /dev/null +++ b/configuration/scripts/options/set_env.bcstchk @@ -0,0 +1,2 @@ +setenv ICE_DRVOPT unittest/bcstchk +setenv ICE_TARGET bcstchk diff --git a/configuration/scripts/options/set_nml.tx1 b/configuration/scripts/options/set_nml.tx1 index dfdf6f19b..2ef4edd33 100644 --- a/configuration/scripts/options/set_nml.tx1 +++ b/configuration/scripts/options/set_nml.tx1 @@ -3,6 +3,7 @@ runtype = 'initial' ice_ic = 'default' grid_format = 'bin' grid_type = 'tripole' +ns_boundary_type = 'tripole' grid_file = 'ICE_MACHINE_INPUTDATA/CICE_data/grid/tx1/grid_tx1.bin' kmt_file = 'ICE_MACHINE_INPUTDATA/CICE_data/grid/tx1/kmt_tx1.bin' atm_data_dir = 'ICE_MACHINE_INPUTDATA/CICE_data/forcing/tx1/JRA55' diff --git a/configuration/scripts/tests/unittest_suite.ts b/configuration/scripts/tests/unittest_suite.ts index d422b2674..21810a1e3 100644 --- a/configuration/scripts/tests/unittest_suite.ts +++ b/configuration/scripts/tests/unittest_suite.ts @@ -3,3 +3,6 @@ unittest gx3 1x1 helloworld unittest gx3 1x1 calchk,short unittest gx3 4x1x25x29x4 sumchk unittest gx3 1x1x25x29x16 sumchk +unittest tx1 8x1 sumchk +unittest gx3 4x1 bcstchk +unittest gx3 1x1 bcstchk From 6321a06caf1a2bea55f3bf6a671d1afe7189e191 Mon Sep 17 00:00:00 2001 From: apcraig Date: Sun, 6 Jun 2021 13:33:15 -0600 Subject: [PATCH 4/6] update lsum16 to revert to double precision if NO_R16 is set --- .../comm/mpi/ice_global_reductions.F90 | 8 +++----- cicecore/drivers/unittest/sumchk/sumchk.F90 | 18 ++++++++++++++++++ 2 files changed, 21 insertions(+), 5 deletions(-) diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 index 0a512ef3e..3bd9237a8 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 @@ -2293,7 +2293,7 @@ subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) ! lsum16 = local sum with real*16 and scalar mpi allreduce, likely to be bfb ! WARNING: this does not work in several compilers and mpi ! implementations due to support for quad precision and consistency -! between underlying datatype in fortran and c. The source code +! between underlying datatypes in fortran and c. The source code ! can be turned off with a cpp NO_R16. Otherwise, it is recommended ! that the results be validated on any platform where it might be used. ! reprosum = fixed point method based on ordered double integer sums. @@ -2327,10 +2327,9 @@ subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) real (real_kind), allocatable :: psums4(:) real (real_kind), allocatable :: sums4(:) real (dbl_kind) , allocatable :: psums8(:) -#ifndef NO_R16 + ! if r16 is not available (NO_R16), then r16 reverts to double precision (r8) real (r16_kind) , allocatable :: psums16(:) real (r16_kind) , allocatable :: sums16(:) -#endif integer (int_kind) :: ns,nf,i,j, ierr @@ -2362,7 +2361,7 @@ subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) deallocate(psums8) -#ifndef NO_R16 + ! if no_r16 is set, this will revert to a double precision calculation like lsum8 elseif (bfbflag == 'lsum16') then allocate(psums16(nf)) psums16(:) = 0._r16_kind @@ -2385,7 +2384,6 @@ subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) sums8 = real(sums16,dbl_kind) deallocate(psums16,sums16) -#endif elseif (bfbflag == 'lsum4') then allocate(psums4(nf)) diff --git a/cicecore/drivers/unittest/sumchk/sumchk.F90 b/cicecore/drivers/unittest/sumchk/sumchk.F90 index 6a692fa0d..d60cf0cb0 100644 --- a/cicecore/drivers/unittest/sumchk/sumchk.F90 +++ b/cicecore/drivers/unittest/sumchk/sumchk.F90 @@ -157,6 +157,23 @@ program sumchk ! correct results for relative digits check in sum allocate(reldigchk(nbflags,nscale)) +#ifdef NO_R16 + ! lsum16 will revert to a double precision calc like lsum8 + reldigchk(:,:) = 15.7 + reldigchk(1:3,1) = 14. + reldigchk(4,1) = 3.9 + reldigchk(1:3,2) = 9. + reldigchk(4,2) = 1. + reldigchk(1:3,3) = 1. + reldigchk(4,3) = 0. + reldigchk(1:3,4) = 0. + reldigchk(4,4) = 0. + reldigchk(5,4) = 15. + if (nx_global == 360 .and. ny_global == 240) then + reldigchk(1:3,1) = 13. + reldigchk(5,4) = 14. + endif +#else reldigchk(:,:) = 15.7 reldigchk(1:2,1) = 14. reldigchk(4,1) = 3.9 @@ -172,6 +189,7 @@ program sumchk reldigchk(1:2,1) = 13. reldigchk(5,4) = 14. endif +#endif ! test list n = 1 ; stringflag1(n) = 'dble sum easy' From d7d59a49665d432dfd06216d16915c3b52d4b98f Mon Sep 17 00:00:00 2001 From: apcraig Date: Sun, 6 Jun 2021 14:30:55 -0600 Subject: [PATCH 5/6] sync up serial ice_global_reductions.F90 --- .../infrastructure/comm/serial/ice_global_reductions.F90 | 8 +++----- cicecore/drivers/unittest/sumchk/sumchk.F90 | 2 +- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 index 049eae6ec..4ee5afe2f 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 @@ -2294,7 +2294,7 @@ subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) ! lsum16 = local sum with real*16 and scalar mpi allreduce, likely to be bfb ! WARNING: this does not work in several compilers and mpi ! implementations due to support for quad precision and consistency -! between underlying datatype in fortran and c. The source code +! between underlying datatypes in fortran and c. The source code ! can be turned off with a cpp NO_R16. Otherwise, it is recommended ! that the results be validated on any platform where it might be used. ! reprosum = fixed point method based on ordered double integer sums. @@ -2328,10 +2328,9 @@ subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) real (real_kind), allocatable :: psums4(:) real (real_kind), allocatable :: sums4(:) real (dbl_kind) , allocatable :: psums8(:) -#ifndef NO_R16 + ! if r16 is not available (NO_R16), then r16 reverts to double precision (r8) real (r16_kind) , allocatable :: psums16(:) real (r16_kind) , allocatable :: sums16(:) -#endif integer (int_kind) :: ns,nf,i,j, ierr @@ -2363,7 +2362,7 @@ subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) deallocate(psums8) -#ifndef NO_R16 + ! if no_r16 is set, this will revert to a double precision calculation like lsum8 elseif (bfbflag == 'lsum16') then allocate(psums16(nf)) psums16(:) = 0._r16_kind @@ -2386,7 +2385,6 @@ subroutine compute_sums_dbl(array2,sums8,mpicomm,numprocs) sums8 = real(sums16,dbl_kind) deallocate(psums16,sums16) -#endif elseif (bfbflag == 'lsum4') then allocate(psums4(nf)) diff --git a/cicecore/drivers/unittest/sumchk/sumchk.F90 b/cicecore/drivers/unittest/sumchk/sumchk.F90 index d60cf0cb0..b0a4c81d5 100644 --- a/cicecore/drivers/unittest/sumchk/sumchk.F90 +++ b/cicecore/drivers/unittest/sumchk/sumchk.F90 @@ -224,7 +224,7 @@ program sumchk if (my_task == master_task) then write(6,*) ' ' write(6,*) 'test ',l - write(6,'(a,e10.4)') 'lscale = ',lscale + write(6,'(a,e11.4)') 'lscale = ',lscale write(6,'(6x,a,28x,a,8x,a,10x,a)') 'test','bfbflag','sum','digits of precision (max is 16)' endif do iblock = 1,numBlocks From cde764ff363b8249a7157cb146e381d7317d8d74 Mon Sep 17 00:00:00 2001 From: apcraig Date: Mon, 7 Jun 2021 14:35:32 -0600 Subject: [PATCH 6/6] - add optics_file_fieldname namelist - add grid_type and ns_boundary_type tripole check - update sumchk unit test to check both Nface and center points. these are treated differently for tripole grids. - update documentation of unit tests --- cicecore/cicedynB/general/ice_forcing_bgc.F90 | 6 +- .../comm/mpi/ice_global_reductions.F90 | 4 +- .../comm/serial/ice_global_reductions.F90 | 4 +- cicecore/cicedynB/infrastructure/ice_grid.F90 | 10 ++ cicecore/drivers/unittest/bcstchk/bcstchk.F90 | 4 + cicecore/drivers/unittest/calchk/calchk.F90 | 3 + cicecore/drivers/unittest/sumchk/sumchk.F90 | 148 ++++++++++-------- cicecore/shared/ice_arrays_column.F90 | 3 +- cicecore/shared/ice_init_column.F90 | 7 +- configuration/scripts/ice_in | 1 + doc/source/cice_index.rst | 2 + doc/source/user_guide/ug_case_settings.rst | 5 +- doc/source/user_guide/ug_testing.rst | 14 ++ 13 files changed, 137 insertions(+), 74 deletions(-) diff --git a/cicecore/cicedynB/general/ice_forcing_bgc.F90 b/cicecore/cicedynB/general/ice_forcing_bgc.F90 index b7c39ad4c..383d388de 100644 --- a/cicecore/cicedynB/general/ice_forcing_bgc.F90 +++ b/cicecore/cicedynB/general/ice_forcing_bgc.F90 @@ -17,7 +17,7 @@ module ice_forcing_bgc use ice_calendar, only: dt, istep, msec, mday, mmonth use ice_fileunits, only: nu_diag use ice_arrays_column, only: restore_bgc, & - bgc_data_dir, fe_data_type, optics_file + bgc_data_dir, fe_data_type, optics_file, optics_file_fieldname use ice_constants, only: c0, p1 use ice_constants, only: field_loc_center, field_type_scalar use ice_exit, only: abort_ice @@ -966,10 +966,10 @@ subroutine faero_optics write (nu_diag,*) ' ' write (nu_diag,*) 'Read optics for modal aerosol treament in' write (nu_diag,*) trim(optics_file) + write (nu_diag,*) 'Read optics file field name = ',trim(optics_file_fieldname) call ice_open_nc(optics_file,fid) -! fieldname='bcint_enh_mam_cice' - fieldname='modalBCabsorptionParameter5band' + fieldname=optics_file_fieldname status = nf90_inq_varid(fid, trim(fieldname), varid) diff --git a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 index 3bd9237a8..0728ac105 100644 --- a/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/mpi/ice_global_reductions.F90 @@ -1691,7 +1691,7 @@ function global_maxval_scalar_int_nodist (scalar, communicator) & result(globalMaxval) ! Computes the global maximum value of a scalar value across -! a distributed machine. +! a communicator. This method supports testing. ! ! This is actually the specific interface for the generic global_maxval ! function corresponding to single precision scalars. @@ -2236,7 +2236,7 @@ function global_minval_scalar_int_nodist (scalar, communicator) & result(globalMinval) ! Computes the global minimum value of a scalar value across -! a distributed machine. +! a communicator. This method supports testing. ! ! This is actually the specific interface for the generic global_minval ! function corresponding to single precision scalars. diff --git a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 index 4ee5afe2f..a024698d5 100644 --- a/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 +++ b/cicecore/cicedynB/infrastructure/comm/serial/ice_global_reductions.F90 @@ -1692,7 +1692,7 @@ function global_maxval_scalar_int_nodist (scalar, communicator) & result(globalMaxval) ! Computes the global maximum value of a scalar value across -! a distributed machine. +! a communicator. This method supports testing. ! ! This is actually the specific interface for the generic global_maxval ! function corresponding to single precision scalars. @@ -2237,7 +2237,7 @@ function global_minval_scalar_int_nodist (scalar, communicator) & result(globalMinval) ! Computes the global minimum value of a scalar value across -! a distributed machine. +! a communicator. This method supports testing. ! ! This is actually the specific interface for the generic global_minval ! function corresponding to single precision scalars. diff --git a/cicecore/cicedynB/infrastructure/ice_grid.F90 b/cicecore/cicedynB/infrastructure/ice_grid.F90 index 2304877d2..2d660af81 100644 --- a/cicecore/cicedynB/infrastructure/ice_grid.F90 +++ b/cicecore/cicedynB/infrastructure/ice_grid.F90 @@ -247,6 +247,16 @@ subroutine init_grid1 allocate(work_g1(nx_global,ny_global)) allocate(work_g2(nx_global,ny_global)) + ! check tripole flags here + ! can't check in init_data because ns_boundary_type is not yet read + ! can't check in init_domain_blocks because grid_type is not accessible due to circular logic + + if (grid_type == 'tripole' .and. ns_boundary_type /= 'tripole' .and. & + ns_boundary_type /= 'tripoleT') then + call abort_ice(subname//'ERROR: grid_type tripole needs tripole ns_boundary_type', & + file=__FILE__, line=__LINE__) + endif + if (trim(grid_type) == 'displaced_pole' .or. & trim(grid_type) == 'tripole' .or. & trim(grid_type) == 'regional' ) then diff --git a/cicecore/drivers/unittest/bcstchk/bcstchk.F90 b/cicecore/drivers/unittest/bcstchk/bcstchk.F90 index 56e8a4421..c0dbb026c 100644 --- a/cicecore/drivers/unittest/bcstchk/bcstchk.F90 +++ b/cicecore/drivers/unittest/bcstchk/bcstchk.F90 @@ -1,6 +1,10 @@ program bcstchk + ! This tests the CICE ice_broadcast infrastructure by calling the + ! methods with hardwired input and known outputs and verifying the + ! results. + use ice_kinds_mod, only: int_kind, dbl_kind, real_kind, log_kind use ice_communicate, only: my_task, master_task, get_num_procs, get_rank, MPI_COMM_ICE use ice_communicate, only: init_communicate, get_num_procs, ice_barrier diff --git a/cicecore/drivers/unittest/calchk/calchk.F90 b/cicecore/drivers/unittest/calchk/calchk.F90 index bbd61b63e..c8472faba 100644 --- a/cicecore/drivers/unittest/calchk/calchk.F90 +++ b/cicecore/drivers/unittest/calchk/calchk.F90 @@ -1,6 +1,9 @@ program calchk + ! This tests the CICE calendar by calling it directly from this driver + ! and verifies results from hardwired inputs with known outputs + use ice_kinds_mod, only: int_kind, dbl_kind use ice_calendar, only: myear, mmonth, mday, msec use ice_calendar, only: year_init, month_init, day_init, sec_init diff --git a/cicecore/drivers/unittest/sumchk/sumchk.F90 b/cicecore/drivers/unittest/sumchk/sumchk.F90 index b0a4c81d5..a811f5964 100644 --- a/cicecore/drivers/unittest/sumchk/sumchk.F90 +++ b/cicecore/drivers/unittest/sumchk/sumchk.F90 @@ -1,6 +1,16 @@ program sumchk + ! This tests the CICE ice_global_reductions infrastructure by + ! using CICE_InitMod (from the standalone model) to read/initialize + ! a CICE grid/configuration. Then methods in ice_global_reductions + ! are verified using hardwired inputs with known outputs. + ! A grid needs to be initialized because most of the global reduction + ! infrastructure assumes haloed and distributed arrays are passed + ! possibly with a tripole seam. These interfaces are more than just + ! layers on top of MPI. They have the CICE grid/decomposition + ! infrastructure built-in. + use CICE_InitMod use CICE_FinalMod use ice_kinds_mod, only: int_kind, dbl_kind, real_kind @@ -10,14 +20,14 @@ program sumchk use ice_domain, only: distrb_info use ice_blocks, only: block, get_block, nx_block, ny_block, nblocks_tot use ice_distribution, only: ice_distributionGetBlockID, ice_distributionGet - use ice_constants, only: field_loc_Nface + use ice_constants, only: field_loc_center, field_loc_Nface use ice_fileunits, only: bfbflag use ice_global_reductions use ice_exit, only: abort_ice implicit none - integer(int_kind) :: i, j, k, l, n, nscale, iblock, ib, ie, jb, je + integer(int_kind) :: i, j, k, l, m, n, iblock, ib, ie, jb, je integer(int_kind) :: blockID, numBlocks type (block) :: this_block @@ -35,7 +45,6 @@ program sumchk real(dbl_kind) :: locval8, sumval8, minval8, maxval8 real(real_kind) :: locval4, sumval4, minval4, maxval4 integer(int_kind) :: iocval, locvali, sumvali, corvali, minvali, maxvali - real(dbl_kind) :: lscale real(dbl_kind) :: reldig,reldigchk_now real(dbl_kind) ,allocatable :: reldigchk(:,:) @@ -56,8 +65,22 @@ program sumchk integer(int_kind) :: npes, ierr, ntask + + integer(int_kind), parameter :: mfld_loc = 2 + integer(int_kind), parameter :: field_loc(mfld_loc) = & + (/ field_loc_center, field_loc_Nface /) + character(len=16), parameter :: field_loc_string(mfld_loc) = & + (/ 'field_loc_center', 'field_loc_Nface ' /) + + integer(int_kind), parameter :: nscale = 4 + real(dbl_kind), parameter :: lscale(nscale) = & + (/ 1.0_dbl_kind, & + 1.0e8_dbl_kind, & + 1.0e16_dbl_kind, & + 1.0e32_dbl_kind /) + integer(int_kind), parameter :: nbflags = 6 - character(len=8), parameter :: bflags(1:nbflags) = & + character(len=8), parameter :: bflags(1:nbflags) = & (/ 'off ','lsum8 ','lsum16 ','lsum4 ','ddpdd ','reprosum' /) character(len=*), parameter :: & passflag = 'PASS', & @@ -130,31 +153,8 @@ program sumchk allocate(mmaski (nx_block,ny_block,max_blocks)) allocate(lmask (nx_block,ny_block,max_blocks)) - ! set corval to something a little interesting (not 1.0 for instance which gives atypical results) - - corval = 4.0_dbl_kind/3.0_dbl_kind - iocval = 8 - if (nx_global == 360 .and. ny_global == 240) then - locval = corval / real((nblocks_tot*(block_size_x*block_size_y-2)-nx_global/2),dbl_kind) - corvali = (nblocks_tot*(block_size_x*block_size_y-2)-nx_global/2)*iocval - elseif (nx_global == 100 .and. ny_global == 116) then - locval = corval / real(nblocks_tot*(block_size_x*block_size_y-2),dbl_kind) - corvali = nblocks_tot*(block_size_x*block_size_y-2)*iocval - else - call abort_ice(subname//' ERROR not set for this grid ') - endif - - if (my_task == master_task) then - write(6,*) ' local array value = ',locval - write(6,*) ' correct value = ',corval - write(6,*) ' correct value int = ',corvali - write(6,*) ' ' - endif - call ice_distributionGet(distrb_info, numLocalBlocks = numBlocks) - nscale = 4 - ! correct results for relative digits check in sum allocate(reldigchk(nbflags,nscale)) #ifdef NO_R16 @@ -212,21 +212,44 @@ program sumchk n = n + 1; stringflag1(n) = 'real prod sum + logical mask' n = n + 1; stringflag1(n) = 'intg prod sum + logical mask' + do m = 1, mfld_loc + + ! set corval to something a little interesting (not 1.0 for instance which gives atypical results) + corval = 4.0_dbl_kind/3.0_dbl_kind + iocval = 8 + ! tuned for gx3 and tx1 only + if ((nx_global == 100 .and. ny_global == 116) .or. & + (nx_global == 360 .and. ny_global == 240)) then + if (field_loc(m) == field_loc_Nface .and. nx_global == 360 .and. ny_global == 240) then + ! tx1 tripole face, need to adjust local value to remove half of row at ny_global + ! or modify corval to account for different sum + locval = corval / real((nblocks_tot*(block_size_x*block_size_y-2)-nx_global/2),dbl_kind) + corvali = (nblocks_tot*(block_size_x*block_size_y-2)-nx_global/2)*iocval + else + locval = corval / real(nblocks_tot*(block_size_x*block_size_y-2),dbl_kind) + corvali = nblocks_tot*(block_size_x*block_size_y-2)*iocval + endif + else + call abort_ice(subname//' ERROR not set for this grid ') + endif + do l = 1, nscale + if (my_task == master_task) then + write(6,*) ' ' + write(6,'(a,i4,a,i4)') 'test: m = ',m,': l = ', l + write(6,'(a,a )') 'field_loc = ',trim(field_loc_string(m)) + write(6,'(a,e11.4)') 'lscale = ',lscale(l) + write(6,*) 'local array value = ',locval + write(6,*) 'correct value = ',corval + write(6,*) 'correct value int = ',corvali + write(6,*) ' ' + write(6,'(6x,a,26x,a,10x,a,10x,a)') 'test','bfbflag','sum','digits of precision (max is 16)' + endif + arrayA(:,:,:) = locval arrayB(:,:,:) = locval arrayC(:,:,:) = locval lmask(:,:,:) = .true. - lscale = 1.0_dbl_kind - if (l == 2) lscale = 1.0e8_dbl_kind - if (l == 3) lscale = 1.0e16_dbl_kind - if (l == 4) lscale = 1.0e32_dbl_kind - if (my_task == master_task) then - write(6,*) ' ' - write(6,*) 'test ',l - write(6,'(a,e11.4)') 'lscale = ',lscale - write(6,'(6x,a,28x,a,8x,a,10x,a)') 'test','bfbflag','sum','digits of precision (max is 16)' - endif do iblock = 1,numBlocks call ice_distributionGetBlockID(distrb_info, iblock, blockID) this_block = get_block(blockID, blockID) @@ -237,11 +260,11 @@ program sumchk lmask(ie,je-1,iblock) = .false. lmask(ie,je-2,iblock) = .false. - arrayA(ie,je-1,iblock) = locval * lscale + arrayA(ie,je-1,iblock) = locval * lscale(l) arrayA(ie,je-2,iblock) = -arrayA(ie,je-1,iblock) - arrayB(ie,je-1,iblock) = locval * lscale + arrayB(ie,je-1,iblock) = locval * lscale(l) arrayB(ie,je-2,iblock) = arrayB(ie,je-1,iblock) - arrayC(ib,jb,iblock) = locval * lscale + arrayC(ib,jb,iblock) = locval * lscale(l) arrayC(ib+1,jb,iblock) = -arrayC(ib,jb,iblock) arrayiA(:,:,iblock) = iocval arrayiB(:,:,iblock) = iocval @@ -258,82 +281,82 @@ program sumchk if (k == 1) then array8(:,:,:) = arrayC(:,:,:) - sumval8 = global_sum(array8, distrb_info, field_loc_Nface) + sumval8 = global_sum(array8, distrb_info, field_loc(m)) elseif (k == 2) then array8(:,:,:) = arrayA(:,:,:) - sumval8 = global_sum(array8, distrb_info, field_loc_Nface) + sumval8 = global_sum(array8, distrb_info, field_loc(m)) elseif (k == 3) then array4(:,:,:) = arrayA(:,:,:) - sumval4 = global_sum(array4, distrb_info, field_loc_Nface) + sumval4 = global_sum(array4, distrb_info, field_loc(m)) sumval8 = sumval4 elseif (k == 4) then arrayi1 = arrayiA - sumvali = global_sum(arrayi1, distrb_info, field_loc_Nface) + sumvali = global_sum(arrayi1, distrb_info, field_loc(m)) elseif (k == 5) then mmask8(:,:,:) = 6.0_dbl_kind array8(:,:,:) = arrayA(:,:,:)/mmask8(:,:,:) - sumval8 = global_sum(array8, distrb_info, field_loc_Nface, mmask=mmask8) + sumval8 = global_sum(array8, distrb_info, field_loc(m), mmask=mmask8) elseif (k == 6) then mmask4(:,:,:) = 6.0_real_kind array4(:,:,:) = arrayA(:,:,:)/mmask4(:,:,:) - sumval4 = global_sum(array4, distrb_info, field_loc_Nface, mmask=mmask4) + sumval4 = global_sum(array4, distrb_info, field_loc(m), mmask=mmask4) sumval8 = sumval4 elseif (k == 7) then mmaski(:,:,:) = 2 arrayi1(:,:,:) = arrayiA(:,:,:)/mmaski(:,:,:) - sumvali = global_sum(arrayi1, distrb_info, field_loc_Nface, mmask=mmaski) + sumvali = global_sum(arrayi1, distrb_info, field_loc(m), mmask=mmaski) elseif (k == 8) then array8(:,:,:) = arrayB(:,:,:) - sumval8 = global_sum(array8, distrb_info, field_loc_Nface, lmask=lmask) + sumval8 = global_sum(array8, distrb_info, field_loc(m), lmask=lmask) elseif (k == 9) then array4(:,:,:) = arrayB(:,:,:) - sumval4 = global_sum(array4, distrb_info, field_loc_Nface, lmask=lmask) + sumval4 = global_sum(array4, distrb_info, field_loc(m), lmask=lmask) sumval8 = sumval4 elseif (k == 10) then arrayi1(:,:,:) = arrayiB(:,:,:) - sumvali = global_sum(arrayi1, distrb_info, field_loc_Nface, lmask=lmask) + sumvali = global_sum(arrayi1, distrb_info, field_loc(m), lmask=lmask) elseif (k == 11) then array82(:,:,:) = 7.0_dbl_kind array8(:,:,:) = arrayA(:,:,:)/array82(:,:,:) - sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc_Nface) + sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc(m)) elseif (k == 12) then array42(:,:,:) = 7.0_real_kind array4(:,:,:) = arrayA(:,:,:)/array42(:,:,:) - sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc_Nface) + sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc(m)) sumval8 = sumval4 elseif (k == 13) then arrayi2(:,:,:) = 4 arrayi1(:,:,:) = arrayiA(:,:,:)/arrayi2(:,:,:) - sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc_Nface) + sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc(m)) elseif (k == 14) then array82(:,:,:) = 7.0_dbl_kind mmask8(:,:,:) = 6.0_dbl_kind array8(:,:,:) = arrayA(:,:,:)/(mmask8(:,:,:)*array82(:,:,:)) - sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc_Nface, mmask=mmask8) + sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc(m), mmask=mmask8) elseif (k == 15) then array42(:,:,:) = 7.0_real_kind mmask4(:,:,:) = 6.0_real_kind array4(:,:,:) = arrayA(:,:,:)/(mmask4(:,:,:)*array42(:,:,:)) - sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc_Nface, mmask=mmask4) + sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc(m), mmask=mmask4) sumval8 = sumval4 elseif (k == 16) then arrayi2(:,:,:) = 2 mmaski(:,:,:) = 2 arrayi1(:,:,:) = arrayiA(:,:,:)/(arrayi2(:,:,:)*mmaski(:,:,:)) - sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc_Nface, mmask=mmaski) + sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc(m), mmask=mmaski) elseif (k == 17) then array82(:,:,:) = 7.0_dbl_kind array8(:,:,:) = arrayB(:,:,:)/array82(:,:,:) - sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc_Nface, lmask=lmask) + sumval8 = global_sum_prod(array8, array82, distrb_info, field_loc(m), lmask=lmask) elseif (k == 18) then array42(:,:,:) = 7.0_real_kind array4(:,:,:) = arrayB(:,:,:)/array42(:,:,:) - sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc_Nface, lmask=lmask) + sumval4 = global_sum_prod(array4, array42, distrb_info, field_loc(m), lmask=lmask) sumval8 = sumval4 elseif (k == 19) then arrayi2(:,:,:) = 4 arrayi1(:,:,:) = arrayiB(:,:,:)/(arrayi2(:,:,:)) - sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc_Nface, lmask=lmask) + sumvali = global_sum_prod(arrayi1, arrayi2, distrb_info, field_loc(m), lmask=lmask) else call abort_ice(subname//' illegal k sum',file=__FILE__,line=__LINE__) endif @@ -374,9 +397,10 @@ program sumchk endif endif endif - enddo ! l - enddo ! n - enddo ! k + enddo ! n + enddo ! k + enddo ! l + enddo ! m ! --------------------------- ! Test Global Min/Max diff --git a/cicecore/shared/ice_arrays_column.F90 b/cicecore/shared/ice_arrays_column.F90 index 7b1f2ee15..46ea6f62e 100644 --- a/cicecore/shared/ice_arrays_column.F90 +++ b/cicecore/shared/ice_arrays_column.F90 @@ -268,7 +268,8 @@ module ice_arrays_column bgc_data_dir ! directory for biogeochemistry data character(char_len_long), public :: & - optics_file ! modal aero optics file + optics_file, & ! modal aero optics file + optics_file_fieldname ! modal aero optics file fieldname real (kind=dbl_kind), dimension(:), allocatable, public :: & R_C2N_DON ! carbon to nitrogen mole ratio of DON pool diff --git a/cicecore/shared/ice_init_column.F90 b/cicecore/shared/ice_init_column.F90 index 746f42574..4f4641467 100644 --- a/cicecore/shared/ice_init_column.F90 +++ b/cicecore/shared/ice_init_column.F90 @@ -1004,7 +1004,7 @@ end subroutine init_hbrine subroutine input_zbgc - use ice_arrays_column, only: restore_bgc, optics_file + use ice_arrays_column, only: restore_bgc, optics_file, optics_file_fieldname use ice_broadcast, only: broadcast_scalar use ice_restart_column, only: restart_bgc, restart_zsal, & restart_hbrine @@ -1047,7 +1047,7 @@ subroutine input_zbgc restore_bgc, restart_bgc, scale_bgc, solve_zsal, restart_zsal, & tr_bgc_Nit, tr_bgc_C, tr_bgc_chl, tr_bgc_Am, tr_bgc_Sil, & tr_bgc_DMS, tr_bgc_PON, tr_bgc_hum, tr_bgc_DON, tr_bgc_Fe, & - grid_o, grid_o_t, l_sk, grid_oS, optics_file, & + grid_o, grid_o_t, l_sk, grid_oS, optics_file, optics_file_fieldname, & l_skS, phi_snow, initbio_frac, frazil_scav, & ratio_Si2N_diatoms , ratio_Si2N_sp , ratio_Si2N_phaeo , & ratio_S2N_diatoms , ratio_S2N_sp , ratio_S2N_phaeo , & @@ -1105,6 +1105,7 @@ subroutine input_zbgc tr_zaero = .false. ! z aerosol tracers modal_aero = .false. ! use modal aerosol treatment of aerosols optics_file = 'unknown_optics_file' ! modal aerosol optics file + optics_file_fieldname = 'unknown_optics_fieldname' ! modal aerosol optics file fieldname restore_bgc = .false. ! restore bgc if true solve_zsal = .false. ! update salinity tracer profile from solve_S_dt restart_bgc = .false. ! biogeochemistry restart @@ -1322,6 +1323,7 @@ subroutine input_zbgc call broadcast_scalar(dEdd_algae, master_task) call broadcast_scalar(modal_aero, master_task) call broadcast_scalar(optics_file, master_task) + call broadcast_scalar(optics_file_fieldname, master_task) call broadcast_scalar(grid_o, master_task) call broadcast_scalar(grid_o_t, master_task) call broadcast_scalar(l_sk, master_task) @@ -1692,6 +1694,7 @@ subroutine input_zbgc write(nu_diag,1010) ' tr_zaero = ', tr_zaero write(nu_diag,1020) ' number of aerosols = ', n_zaero write(nu_diag,1031) ' optics_file = ', trim(optics_file) + write(nu_diag,1031) ' optics_file_fieldname = ', trim(optics_file_fieldname) ! bio parameters write(nu_diag,1000) ' grid_o = ', grid_o write(nu_diag,1000) ' grid_o_t = ', grid_o_t diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 74fc548f0..79103425d 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -263,6 +263,7 @@ tr_zaero = .false. modal_aero = .false. optics_file = 'ICE_MACHINE_INPUTDATA/CICE_data/forcing/snicar_optics_5bnd_snow_and_aerosols.nc' + optics_file_fieldname = 'modalBCabsorptionParameter5band' skl_bgc = .false. z_tracers = .false. dEdd_algae = .false. diff --git a/doc/source/cice_index.rst b/doc/source/cice_index.rst index 365385e25..08718fdc8 100644 --- a/doc/source/cice_index.rst +++ b/doc/source/cice_index.rst @@ -464,6 +464,8 @@ either Celsius or Kelvin units). "ocn_data_type", ":math:`\bullet` source of surface temperature, salinity data", "" "omega", "angular velocity of Earth", "7.292\ :math:`\times`\ 10\ :math:`^{-5}` rad/s" "opening", "rate of ice opening due to divergence and shear", "1/s" + "optics_file", "optics filename associated with modal aerosols", "" + "optics_file_fieldname", "optics file fieldname that is read", "" "**P**", "", "" "p001", "1/1000", "" "p01", "1/100", "" diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 355b5665d..225ab91b1 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -647,7 +647,7 @@ zbgc_nml "``l_skS``", "real", "z salinity characteristic diffusive scale in m", "7.0" "``max_dfe_doc1``", "real", "max ratio of dFe to saccharides in the ice in nm Fe / muM C", "0.2" "``max_loss``", "real", "restrict uptake to percent of remaining value", "0.9" - "``modal_aero``", "logical", "modal aersols", "``.false.``" + "``modal_aero``", "logical", "modal aerosols", "``.false.``" "``mort_pre_diatoms``", "real", "mortality diatoms", "0.007" "``mort_pre_phaeo``", "real", "mortality phaeocystis", "0.007" "``mort_pre_sp``", "real", "mortality small plankton", "0.007" @@ -658,7 +658,8 @@ zbgc_nml "``mu_max_phaeo``", "real", "maximum growth rate phaeocystis per day", "0.851" "``mu_max_sp``", "real", "maximum growth rate small plankton per day", "0.851" "``nitratetype``", "real", "mobility type between stationary and mobile nitrate", "-1.0" - "``optics_file``", "string", "optics file associated with modal aersols", "unknown_optics_file" + "``optics_file``", "string", "optics file associated with modal aerosols", "unknown_optics_file" + "``optics_file_fieldname``", "string", "optics file fieldname to read", "unknown_optics_fieldname" "``op_dep_min``", "real", "light attenuates for optical depths exceeding min", "0.1" "``phi_snow``", "real", "snow porosity for brine height tracer", "0.5" "``ratio_chl2N_diatoms``", "real", "algal chl to N in mg/mmol diatoms", "2.1" diff --git a/doc/source/user_guide/ug_testing.rst b/doc/source/user_guide/ug_testing.rst index 5a289db6a..f2bc62656 100644 --- a/doc/source/user_guide/ug_testing.rst +++ b/doc/source/user_guide/ug_testing.rst @@ -667,6 +667,20 @@ in **configuration/scripts/options**. In particular, **ICE_DRVOPT** and **configuration/scripts/Makefile** and create a target for the unit test. The unit tests calchk or helloworld can be used as examples. +The following are brief descriptions of some of the current unit tests, + + - **bcstchk** is a unit test that exercises the methods in ice_broadcast.F90. This test does not + depend on the CICE grid to carry out the testing. By testing with a serial and mpi configuration, + both sets of software are tested independently and correctness is verified. + - **calchk** is a unit test that exercises the CICE calendar over 100,000 years and verifies correctness. + This test does not depend on the CICE initialization. + - **helloworld** is a simple test that writes out helloworld and uses no CICE infrastructure. + This tests exists to demonstrate how to build a unit test by specifying the object files directly + in the Makefile + - **sumchk** is a unit test that exercises the methods in ice_global_reductions.F90. This test requires + that a CICE grid and decomposition be initialized, so CICE_InitMod.F90 is leveraged to initialize + the model prior to running a suite of unit validation tests to verify correctness. + .. _testreporting: