diff --git a/LICENSE.pdf b/LICENSE.pdf index 80ae31d51..18e26a099 100644 Binary files a/LICENSE.pdf and b/LICENSE.pdf differ diff --git a/cice.setup b/cice.setup index 4c7a222ff..0e574f803 100755 --- a/cice.setup +++ b/cice.setup @@ -684,7 +684,7 @@ EOF set thrd = `echo ${pesx} | cut -d x -f 2` set blckx = `echo ${pesx} | cut -d x -f 3` set blcky = `echo ${pesx} | cut -d x -f 4` - set mblck = 0 + set mblck = -1 if (${task} == 0 || ${thrd} == 0 || ${blckx} == 0 || ${blcky} == 0) then echo "${0}: ERROR in -p argument, cannot have zeros" exit -1 @@ -696,7 +696,7 @@ EOF set thrd = `echo ${pesx} | cut -d x -f 2` set blckx = 0 set blcky = 0 - set mblck = 0 + set mblck = -1 if (${task} == 0 || ${thrd} == 0) then echo "${0}: ERROR in -p argument, cannot have zeros" exit -1 @@ -708,7 +708,7 @@ EOF set thrd = 1 set blckx = 0 set blcky = 0 - set mblck = 0 + set mblck = -1 if (${task} == 0) then echo "${0}: ERROR in -p argument, cannot have zeros" exit -1 @@ -757,7 +757,7 @@ EOF # update pesx based on use defined settings and machine limits to reflect actual value set pesx = ${task}x${thrd}x${blckx}x${blcky}x${mblck} - if (${mblck} == 0) then + if (${mblck} <= 0) then set pesx = ${task}x${thrd}x${blckx}x${blcky} endif if (${blckx} == 0 || ${blcky} == 0) then @@ -838,7 +838,7 @@ EOF endif # from basic script dir to case - foreach file (cice.build cice.settings Makefile ice_in makdep.c setup_run_dirs.csh timeseries.csh timeseries.py) + foreach file (cice.build cice.settings Makefile ice_in makdep.c setup_run_dirs.csh ciceplots.csh ciceplots2d.py timeseries.py) if !(-e ${ICE_SCRIPTS}/$file) then echo "${0}: ERROR, ${ICE_SCRIPTS}/$file not found" exit -1 diff --git a/cicecore/cicedyn/dynamics/ice_dyn_core1d.F90 b/cicecore/cicedyn/dynamics/ice_dyn_core1d.F90 index f3f71b490..b95d2be67 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_core1d.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_core1d.F90 @@ -1,5 +1,5 @@ !=============================================================================== -! Copyright (C) 2023, Intel Corporation +! Copyright (C) 2024, Intel Corporation ! Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: ! 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. ! 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. diff --git a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 index 301a89916..68101f579 100644 --- a/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 +++ b/cicecore/cicedyn/dynamics/ice_dyn_evp.F90 @@ -718,8 +718,8 @@ subroutine evp (dt) field_loc_Eface, field_type_vector) call ice_timer_stop(timer_bound) - call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') - call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') + call grid_average_X2Y('A', uvelE, 'E', uvel, 'U') + call grid_average_X2Y('A', vvelN, 'N', vvel, 'U') uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) endif @@ -1084,8 +1084,8 @@ subroutine evp (dt) field_loc_Eface, field_type_vector, & vvelE) - call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') - call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') + call grid_average_X2Y('A', uvelE, 'E', uvel, 'U') + call grid_average_X2Y('A', vvelN, 'N', vvel, 'U') uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) @@ -1275,8 +1275,8 @@ subroutine evp (dt) field_loc_Nface, field_type_vector, & uvelN, vvelN) - call grid_average_X2Y('S', uvelE, 'E', uvel, 'U') - call grid_average_X2Y('S', vvelN, 'N', vvel, 'U') + call grid_average_X2Y('A', uvelE, 'E', uvel, 'U') + call grid_average_X2Y('A', vvelN, 'N', vvel, 'U') uvel(:,:,:) = uvel(:,:,:)*uvm(:,:,:) vvel(:,:,:) = vvel(:,:,:)*uvm(:,:,:) diff --git a/cicecore/cicedyn/general/ice_forcing.F90 b/cicecore/cicedyn/general/ice_forcing.F90 index b977f54aa..241bf8b5d 100755 --- a/cicecore/cicedyn/general/ice_forcing.F90 +++ b/cicecore/cicedyn/general/ice_forcing.F90 @@ -2276,6 +2276,9 @@ subroutine JRA55_files(yr) enddo if (.not.exists) then + write(nu_diag,*) subname,' atm_data_dir = ',trim(atm_data_dir) + write(nu_diag,*) subname,' atm_data_type_prefix = ',trim(atm_data_type_prefix) + write(nu_diag,*) subname,' atm_data_version = ',trim(atm_data_version) call abort_ice(error_message=subname//' could not find forcing file') endif diff --git a/cicecore/cicedyn/infrastructure/comm/mpi/ice_gather_scatter.F90 b/cicecore/cicedyn/infrastructure/comm/mpi/ice_gather_scatter.F90 index 030deabca..cfb98befe 100644 --- a/cicecore/cicedyn/infrastructure/comm/mpi/ice_gather_scatter.F90 +++ b/cicecore/cicedyn/infrastructure/comm/mpi/ice_gather_scatter.F90 @@ -1836,12 +1836,12 @@ subroutine scatter_global_dbl(ARRAY, ARRAY_G, src_task, dst_dist, & !----------------------------------------------------------------- if (field_loc == field_loc_noupdate) then - do n=1,nblocks_tot + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1 .and. & + dst_dist%blockLocalID(n) > 0) then + dst_block = dst_dist%blockLocalID(n) this_block = get_block(n,n) - - if (dst_block > 0) then - ! north edge do j = this_block%jhi+1,ny_block do i = 1, nx_block @@ -1867,8 +1867,8 @@ subroutine scatter_global_dbl(ARRAY, ARRAY_G, src_task, dst_dist, & enddo enddo - endif - enddo + endif + enddo endif if (add_mpi_barriers) then @@ -2222,12 +2222,12 @@ subroutine scatter_global_real(ARRAY, ARRAY_G, src_task, dst_dist, & !----------------------------------------------------------------- if (field_loc == field_loc_noupdate) then - do n=1,nblocks_tot + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1 .and. & + dst_dist%blockLocalID(n) > 0) then + dst_block = dst_dist%blockLocalID(n) this_block = get_block(n,n) - - if (dst_block > 0) then - ! north edge do j = this_block%jhi+1,ny_block do i = 1, nx_block @@ -2253,8 +2253,8 @@ subroutine scatter_global_real(ARRAY, ARRAY_G, src_task, dst_dist, & enddo enddo - endif - enddo + endif + enddo endif if (add_mpi_barriers) then @@ -2608,12 +2608,12 @@ subroutine scatter_global_int(ARRAY, ARRAY_G, src_task, dst_dist, & !----------------------------------------------------------------- if (field_loc == field_loc_noupdate) then - do n=1,nblocks_tot + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) == my_task+1 .and. & + dst_dist%blockLocalID(n) > 0) then + dst_block = dst_dist%blockLocalID(n) this_block = get_block(n,n) - - if (dst_block > 0) then - ! north edge do j = this_block%jhi+1,ny_block do i = 1, nx_block @@ -2639,8 +2639,8 @@ subroutine scatter_global_int(ARRAY, ARRAY_G, src_task, dst_dist, & enddo enddo - endif - enddo + endif + enddo endif if (add_mpi_barriers) then diff --git a/cicecore/cicedyn/infrastructure/comm/serial/ice_gather_scatter.F90 b/cicecore/cicedyn/infrastructure/comm/serial/ice_gather_scatter.F90 index 34cca2d03..5f4938281 100644 --- a/cicecore/cicedyn/infrastructure/comm/serial/ice_gather_scatter.F90 +++ b/cicecore/cicedyn/infrastructure/comm/serial/ice_gather_scatter.F90 @@ -1002,12 +1002,12 @@ subroutine scatter_global_dbl(ARRAY, ARRAY_G, src_task, dst_dist, & !----------------------------------------------------------------- if (field_loc == field_loc_noupdate) then - do n=1,nblocks_tot + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) /= 0 .and. & + dst_dist%blockLocalID(n) > 0) then + dst_block = dst_dist%blockLocalID(n) this_block = get_block(n,n) - - if (dst_block > 0) then - ! north edge do j = this_block%jhi+1,ny_block do i = 1, nx_block @@ -1033,8 +1033,8 @@ subroutine scatter_global_dbl(ARRAY, ARRAY_G, src_task, dst_dist, & enddo enddo - endif - enddo + endif + enddo endif !----------------------------------------------------------------------- @@ -1250,12 +1250,12 @@ subroutine scatter_global_real(ARRAY, ARRAY_G, src_task, dst_dist, & !----------------------------------------------------------------- if (field_loc == field_loc_noupdate) then - do n=1,nblocks_tot + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) /= 0 .and. & + dst_dist%blockLocalID(n) > 0) then + dst_block = dst_dist%blockLocalID(n) this_block = get_block(n,n) - - if (dst_block > 0) then - ! north edge do j = this_block%jhi+1,ny_block do i = 1, nx_block @@ -1281,8 +1281,8 @@ subroutine scatter_global_real(ARRAY, ARRAY_G, src_task, dst_dist, & enddo enddo - endif - enddo + endif + enddo endif !----------------------------------------------------------------------- @@ -1498,12 +1498,12 @@ subroutine scatter_global_int(ARRAY, ARRAY_G, src_task, dst_dist, & !----------------------------------------------------------------- if (field_loc == field_loc_noupdate) then - do n=1,nblocks_tot + do n=1,nblocks_tot + if (dst_dist%blockLocation(n) /= 0 .and. & + dst_dist%blockLocalID(n) > 0) then + dst_block = dst_dist%blockLocalID(n) this_block = get_block(n,n) - - if (dst_block > 0) then - ! north edge do j = this_block%jhi+1,ny_block do i = 1, nx_block diff --git a/cicecore/cicedyn/infrastructure/ice_domain.F90 b/cicecore/cicedyn/infrastructure/ice_domain.F90 index 8b680f2d4..91af49947 100644 --- a/cicecore/cicedyn/infrastructure/ice_domain.F90 +++ b/cicecore/cicedyn/infrastructure/ice_domain.F90 @@ -101,7 +101,7 @@ subroutine init_domain_blocks ! This routine reads in domain information and calls the routine ! to set up the block decomposition. - use ice_distribution, only: processor_shape + use ice_distribution, only: processor_shape, proc_decomposition use ice_domain_size, only: ncat, nilyr, nslyr, max_blocks, & nx_global, ny_global, block_size_x, block_size_y use ice_fileunits, only: goto_nml @@ -112,7 +112,8 @@ subroutine init_domain_blocks !---------------------------------------------------------------------- integer (int_kind) :: & - nml_error ! namelist read error flag + nml_error, & ! namelist read error flag + nprocs_x, nprocs_y ! procs decomposed into blocks character(len=char_len) :: nml_name ! text namelist name character(len=char_len_long) :: tmpstr2 ! for namelist check @@ -216,21 +217,29 @@ subroutine init_domain_blocks call broadcast_scalar(maskhalo_bound, master_task) call broadcast_scalar(add_mpi_barriers, master_task) call broadcast_scalar(debug_blocks, master_task) - if (my_task == master_task) then - if (max_blocks < 1) then - max_blocks=( ((nx_global-1)/block_size_x + 1) * & - ((ny_global-1)/block_size_y + 1) - 1) / nprocs + 1 - max_blocks=max(1,max_blocks) - write(nu_diag,'(/,a52,i6,/)') & - '(ice_domain): max_block < 1: max_block estimated to ',max_blocks - endif - endif call broadcast_scalar(max_blocks, master_task) call broadcast_scalar(block_size_x, master_task) call broadcast_scalar(block_size_y, master_task) call broadcast_scalar(nx_global, master_task) call broadcast_scalar(ny_global, master_task) +!---------------------------------------------------------------------- +! +! Set nprocs if not explicitly set to valid value in namelist +! +!---------------------------------------------------------------------- + +#ifdef CESMCOUPLED + nprocs = get_num_procs() +#else + if (nprocs < 0) then + nprocs = get_num_procs() + else if (nprocs /= get_num_procs()) then + write(nu_diag,*) subname,' ERROR: nprocs, get_num_procs = ',nprocs,get_num_procs() + call abort_ice(subname//' ERROR: Input nprocs not same as system (e.g MPI) request', file=__FILE__, line=__LINE__) + endif +#endif + !---------------------------------------------------------------------- ! ! perform some basic checks on domain @@ -242,16 +251,6 @@ subroutine init_domain_blocks !*** domain size zero or negative !*** call abort_ice(subname//' ERROR: Invalid domain: size < 1', file=__FILE__, line=__LINE__) ! no domain - else if (nprocs /= get_num_procs()) then - !*** - !*** input nprocs does not match system (eg MPI) request - !*** -#if (defined CESMCOUPLED) - nprocs = get_num_procs() -#else - write(nu_diag,*) subname,' ERROR: nprocs, get_num_procs = ',nprocs,get_num_procs() - call abort_ice(subname//' ERROR: Input nprocs not same as system request', file=__FILE__, line=__LINE__) -#endif else if (nghost < 1) then !*** !*** must have at least 1 layer of ghost cells @@ -315,6 +314,7 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) use ice_boundary, only: ice_HaloCreate use ice_distribution, only: create_distribution, create_local_block_ids, ice_distributionGet use ice_domain_size, only: max_blocks, nx_global, ny_global + use ice_global_reductions, only: global_sum, global_maxval real (dbl_kind), dimension(nx_global,ny_global), intent(in) :: & KMTG ,&! global topography @@ -602,9 +602,9 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) work_per_block = 0 end where if (my_task == master_task) then - write(nu_diag,*) 'ice_domain work_unit, max_work_unit = ',work_unit, max_work_unit - write(nu_diag,*) 'ice_domain nocn = ',minval(nocn),maxval(nocn),sum(nocn) - write(nu_diag,*) 'ice_domain work_per_block = ',minval(work_per_block),maxval(work_per_block),sum(work_per_block) + write(nu_diag,'(2a,4i9)') subname,' work_unit = ',work_unit, max_work_unit + write(nu_diag,'(2a,4i9)') subname,' nocn = ',minval(nocn),maxval(nocn),sum(nocn) + write(nu_diag,'(2a,4i9)') subname,' work_per_block = ',minval(work_per_block),maxval(work_per_block),sum(work_per_block) endif deallocate(nocn) @@ -628,8 +628,42 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) call create_local_block_ids(blocks_ice, distrb_info) - ! write out block distribution - ! internal check of icedistributionGet as part of verification process +!---------------------------------------------------------------------- +! +! check block sizes and max_blocks +! +!---------------------------------------------------------------------- + + if (associated(blocks_ice)) then + nblocks = size(blocks_ice) + else + nblocks = 0 + endif + + tblocks_tmp = global_sum(nblocks, distrb_info) + nblocks_max = global_maxval(nblocks, distrb_info) + + if (my_task == master_task) then + write(nu_diag,'(2a,i8)') subname,' total number of blocks is', tblocks_tmp + endif + + if (nblocks > max_blocks) then + write(nu_diag,'(2a,2i6)') subname,' ERROR: nblocks, max_blocks = ',nblocks,max_blocks + write(nu_diag,'(2a,2i6)') subname,' ERROR: max_blocks too small: increase to', nblocks_max + call abort_ice(subname//' ERROR max_blocks too small', file=__FILE__, line=__LINE__) + else if (nblocks_max < max_blocks) then + if (my_task == master_task) then + write(nu_diag,'(2a,2i6)') subname,' NOTE: max_blocks too large: decrease to', nblocks_max + endif + endif + +!---------------------------------------------------------------------- +! +! write out block distribution +! internal check of icedistributionGet as part of verification process +! +!---------------------------------------------------------------------- + if (debug_blocks) then call flush_fileunit(nu_diag) @@ -707,38 +741,6 @@ subroutine init_domain_distribution(KMTG,ULATG,grid_ice) endif endif - if (associated(blocks_ice)) then - nblocks = size(blocks_ice) - else - nblocks = 0 - endif - nblocks_max = 0 - tblocks_tmp = 0 - do n=0,distrb_info%nprocs - 1 - nblocks_tmp = nblocks - call broadcast_scalar(nblocks_tmp, n) - nblocks_max = max(nblocks_max,nblocks_tmp) - tblocks_tmp = tblocks_tmp + nblocks_tmp - end do - - if (my_task == master_task) then - write(nu_diag,*) & - 'ice: total number of blocks is', tblocks_tmp - endif - - if (nblocks_max > max_blocks) then - write(outstring,*) ' ERROR: num blocks exceed max: increase max to', nblocks_max - call abort_ice(subname//trim(outstring), file=__FILE__, line=__LINE__) - else if (nblocks_max < max_blocks) then - write(outstring,*) 'WARNING: ice no. blocks too large: decrease max to', nblocks_max - if (my_task == master_task) then - write(nu_diag,*) ' ********WARNING***********' - write(nu_diag,*) subname,trim(outstring) - write(nu_diag,*) ' **************************' - write(nu_diag,*) ' ' - endif - endif - !---------------------------------------------------------------------- ! ! Set up ghost cell updates for each distribution. diff --git a/cicecore/cicedyn/infrastructure/ice_grid.F90 b/cicecore/cicedyn/infrastructure/ice_grid.F90 index c43b7989c..54bc3ad92 100644 --- a/cicecore/cicedyn/infrastructure/ice_grid.F90 +++ b/cicecore/cicedyn/infrastructure/ice_grid.F90 @@ -301,6 +301,10 @@ subroutine init_grid1 real (kind=dbl_kind), dimension(:,:), allocatable :: & work_g1, work_g2 + integer (kind=int_kind) :: & + max_blocks_min, & ! min value of max_blocks across procs + max_blocks_max ! max value of max_blocks across procs + real (kind=dbl_kind) :: & rad_to_deg @@ -390,9 +394,15 @@ subroutine init_grid1 ! write additional domain information !----------------------------------------------------------------- + max_blocks_min = global_minval(max_blocks, distrb_info) + max_blocks_max = global_maxval(max_blocks, distrb_info) if (my_task == master_task) then - write(nu_diag,'(a26,i6)') ' Block size: nx_block = ',nx_block - write(nu_diag,'(a26,i6)') ' ny_block = ',ny_block + write(nu_diag,* ) '' + write(nu_diag,'(2a)' ) subname,' Block size:' + write(nu_diag,'(2a,i8)') subname,' nx_block = ',nx_block + write(nu_diag,'(2a,i8)') subname,' ny_block = ',ny_block + write(nu_diag,'(2a,i8)') subname,' min(max_blocks) = ',max_blocks_min + write(nu_diag,'(2a,i8)') subname,' max(max_blocks) = ',max_blocks_max endif end subroutine init_grid1 diff --git a/cicecore/cicedyn/infrastructure/ice_memusage.F90 b/cicecore/cicedyn/infrastructure/ice_memusage.F90 index 323a9074e..45b882879 100644 --- a/cicecore/cicedyn/infrastructure/ice_memusage.F90 +++ b/cicecore/cicedyn/infrastructure/ice_memusage.F90 @@ -8,13 +8,16 @@ MODULE ice_memusage !------------------------------------------------------------------------------- use ice_kinds_mod, only : dbl_kind, log_kind + use ice_fileunits, only : nu_diag + use ice_exit, only : abort_ice implicit none private ! PUBLIC: Public interfaces - public :: ice_memusage_getusage, & + public :: ice_memusage_allocErr, & + ice_memusage_getusage, & ice_memusage_init, & ice_memusage_print @@ -29,6 +32,35 @@ MODULE ice_memusage contains +!=============================================================================== +! check memory alloc/dealloc return code + +logical function ice_memusage_allocErr(istat, errstr) + + implicit none + + !----- arguments ----- + + integer :: istat !< input error code + + character(len=*), optional :: errstr !< error string + + !----- local ----- + + character(*),parameter :: subname = '(ice_memusage_allocErr)' + + ice_memusage_allocErr = .false. + if (istat /= 0) then + ice_memusage_allocErr = .true. + if (present(errstr)) then + write(nu_diag,*) 'ERROR: '//trim(errstr) + endif + call abort_ice(subname//'ERROR: alloc/dealloc', file=__FILE__, line=__LINE__) + return + endif + +end function ice_memusage_allocErr + !=============================================================================== ! Initialize memory conversion to MB diff --git a/cicecore/cicedyn/infrastructure/io/io_binary/ice_restart.F90 b/cicecore/cicedyn/infrastructure/io/io_binary/ice_restart.F90 index 606f0d46b..5866d7130 100644 --- a/cicecore/cicedyn/infrastructure/io/io_binary/ice_restart.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_binary/ice_restart.F90 @@ -12,6 +12,7 @@ module ice_restart use ice_restart_shared, only: & restart, restart_ext, restart_dir, restart_file, pointer_file, & runid, runtype, use_restart_time, lenstr + use ice_communicate, only: my_task, master_task use ice_fileunits, only: nu_diag, nu_rst_pointer use ice_fileunits, only: nu_dump, nu_dump_eap, nu_dump_FY, nu_dump_age use ice_fileunits, only: nu_dump_lvl, nu_dump_pond, nu_dump_hbrine @@ -48,7 +49,6 @@ subroutine init_restart_read(ice_ic) use ice_calendar, only: istep0, istep1, timesecs, npt, myear, & set_date_from_timesecs - use ice_communicate, only: my_task, master_task use ice_dyn_shared, only: kdyn use ice_read_write, only: ice_open, ice_open_ext @@ -381,7 +381,6 @@ subroutine init_restart_write(filename_spec) use ice_calendar, only: msec, mmonth, mday, myear, istep1, & timesecs - use ice_communicate, only: my_task, master_task use ice_dyn_shared, only: kdyn use ice_read_write, only: ice_open, ice_open_ext @@ -721,7 +720,9 @@ subroutine read_restart_field(nu,nrec,work,atype,vname,ndim3, & character(len=*), parameter :: subname = '(read_restart_field)' - write(nu_diag,*) 'vname ',trim(vname) + if (my_task == master_task) then + write(nu_diag,*) subname,' read vname ',trim(vname) + endif if (present(field_loc)) then do n=1,ndim3 if (restart_ext) then @@ -782,6 +783,9 @@ subroutine write_restart_field(nu,nrec,work,atype,vname,ndim3,diag) character(len=*), parameter :: subname = '(write_restart_field)' + if (my_task == master_task) then + write(nu_diag,*) subname,' write vname ',trim(vname) + endif do n=1,ndim3 work2(:,:,:) = work(:,:,n,:) if (restart_ext) then @@ -801,7 +805,6 @@ end subroutine write_restart_field subroutine final_restart() use ice_calendar, only: istep1, timesecs - use ice_communicate, only: my_task, master_task logical (kind=log_kind) :: & tr_iage, tr_FY, tr_lvl, tr_iso, tr_aero, & diff --git a/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 index 7d29fc4cc..92df8dad8 100644 --- a/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_history_write.F90 @@ -152,11 +152,26 @@ subroutine ice_write_hist (ns) if (history_format == 'cdf1') then iflag = nf90_clobber elseif (history_format == 'cdf2') then +#ifdef NO_CDF2 + call abort_ice(subname//' ERROR: history_format cdf2 not available ', & + file=__FILE__, line=__LINE__) +#else iflag = ior(nf90_clobber,nf90_64bit_offset) +#endif elseif (history_format == 'cdf5') then +#ifdef NO_CDF5 + call abort_ice(subname//' ERROR: history_format cdf5 not available ', & + file=__FILE__, line=__LINE__) +#else iflag = ior(nf90_clobber,nf90_64bit_data) +#endif elseif (history_format == 'hdf5') then +#ifdef NO_HDF5 + call abort_ice(subname//' ERROR: history_format hdf5 not available ', & + file=__FILE__, line=__LINE__) +#else iflag = ior(nf90_clobber,nf90_netcdf4) +#endif else call abort_ice(subname//' ERROR: history_format not allowed for '//trim(history_format), & file=__FILE__, line=__LINE__) @@ -1192,6 +1207,12 @@ subroutine ice_hist_field_def(ncid, hfield, lprecision, dimids, ns) status = nf90_def_var(ncid, hfield%vname, lprecision, dimids, varid) call ice_check_nc(status, subname//' ERROR: defining var '//trim(hfield%vname),file=__FILE__,line=__LINE__) +#ifdef NO_HDF5 + if (history_format=='hdf5') then + call abort_ice(subname//' ERROR: history_format hdf5 not available ', & + file=__FILE__, line=__LINE__) + endif +#else if (history_format=='hdf5' .and. size(dimids)>1) then if (dimids(1)==imtid .and. dimids(2)==jmtid) then chunks(1)=history_chunksize(1) @@ -1208,6 +1229,7 @@ subroutine ice_hist_field_def(ncid, hfield, lprecision, dimids, ns) status = nf90_def_var_deflate(ncid, varid, shuffle=0, deflate=1, deflate_level=history_deflate) call ice_check_nc(status, subname//' ERROR deflating var '//trim(hfield%vname), file=__FILE__, line=__LINE__) endif +#endif ! add attributes status = nf90_put_att(ncid,varid,'units', hfield%vunit) @@ -1335,6 +1357,12 @@ subroutine ice_hist_coord_def(ncid, coord, lprecision, dimids, varid) status = nf90_def_var(ncid, coord%short_name, lprecision, dimids, varid) call ice_check_nc(status, subname//' ERROR: defining coord '//coord%short_name,file=__FILE__,line=__LINE__) +#ifdef NO_HDF5 + if (history_format=='hdf5') then + call abort_ice(subname//' ERROR: history_format hdf5 not available ', & + file=__FILE__, line=__LINE__) + endif +#else if (history_format=='hdf5' .and. size(dimids)>1) then if (dimids(1)==imtid .and. dimids(2)==jmtid) then chunks(1)=history_chunksize(1) @@ -1351,6 +1379,7 @@ subroutine ice_hist_coord_def(ncid, coord, lprecision, dimids, varid) status=nf90_def_var_deflate(ncid, varid, shuffle=0, deflate=1, deflate_level=history_deflate) call ice_check_nc(status, subname//' ERROR deflating var '//trim(coord%short_name), file=__FILE__, line=__LINE__) endif +#endif status = nf90_put_att(ncid,varid,'long_name',trim(coord%long_name)) call ice_check_nc(status, subname// ' ERROR: defining long_name for '//coord%short_name, & diff --git a/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_restart.F90 b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_restart.F90 index e9be45481..9bf3b1d8a 100644 --- a/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_restart.F90 +++ b/cicecore/cicedyn/infrastructure/io/io_netcdf/ice_restart.F90 @@ -221,11 +221,26 @@ subroutine init_restart_write(filename_spec) if (restart_format == 'cdf1') then iflag = nf90_clobber elseif (restart_format == 'cdf2') then +#ifdef NO_CDF2 + call abort_ice(subname//' ERROR: restart_format cdf2 not available ', & + file=__FILE__, line=__LINE__) +#else iflag = ior(nf90_clobber,nf90_64bit_offset) +#endif elseif (restart_format == 'cdf5') then +#ifdef NO_CDF5 + call abort_ice(subname//' ERROR: restart_format cdf5 not available ', & + file=__FILE__, line=__LINE__) +#else iflag = ior(nf90_clobber,nf90_64bit_data) +#endif elseif (restart_format == 'hdf5') then +#ifdef NO_HDF5 + call abort_ice(subname//' ERROR: restart_format hdf5 not available ', & + file=__FILE__, line=__LINE__) +#else iflag = ior(nf90_clobber,nf90_netcdf4) +#endif else call abort_ice(subname//' ERROR: restart_format not allowed for '//trim(restart_format), & file=__FILE__, line=__LINE__) @@ -894,6 +909,12 @@ subroutine define_rest_field(ncid, vname, dims) status = nf90_def_var(ncid,trim(vname),nf90_double,dims,varid) call ice_check_nc(status, subname//' ERROR: def var '//trim(vname), file=__FILE__, line=__LINE__) +#ifdef NO_HDF5 + if (restart_format=='hdf5') then + call abort_ice(subname//' ERROR: restart_format hdf5 not available ', & + file=__FILE__, line=__LINE__) + endif +#else if (restart_format=='hdf5' .and. size(dims)>1) then if (dims(1)==dimid_ni .and. dims(2)==dimid_nj) then chunks(1)=restart_chunksize(1) @@ -910,6 +931,7 @@ subroutine define_rest_field(ncid, vname, dims) status=nf90_def_var_deflate(ncid, varid, shuffle=0, deflate=1, deflate_level=restart_deflate) call ice_check_nc(status, subname//' ERROR deflating var '//trim(vname), file=__FILE__, line=__LINE__) endif +#endif #else call abort_ice(subname//' ERROR: USE_NETCDF cpp not defined', & diff --git a/cicecore/drivers/direct/hadgem3/CICE.F90 b/cicecore/drivers/direct/hadgem3/CICE.F90 index fac02de9b..2cdac546a 100644 --- a/cicecore/drivers/direct/hadgem3/CICE.F90 +++ b/cicecore/drivers/direct/hadgem3/CICE.F90 @@ -1,8 +1,8 @@ !======================================================================= -! Copyright (c) 2023, Triad National Security, LLC +! Copyright (c) 2024, Triad National Security, LLC ! All rights reserved. ! -! Copyright 2023. Triad National Security, LLC. This software was +! Copyright 2024. Triad National Security, LLC. This software was ! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad ! National Security, LLC for the U.S. Department of Energy. The U.S. diff --git a/cicecore/drivers/mct/cesm1/CICE_copyright.txt b/cicecore/drivers/mct/cesm1/CICE_copyright.txt index af2afdf3d..3f81ec782 100644 --- a/cicecore/drivers/mct/cesm1/CICE_copyright.txt +++ b/cicecore/drivers/mct/cesm1/CICE_copyright.txt @@ -1,7 +1,7 @@ -! Copyright (c) 2023, Triad National Security, LLC +! Copyright (c) 2024, Triad National Security, LLC ! All rights reserved. ! -! Copyright 2023. Triad National Security, LLC. This software was +! Copyright 2024. Triad National Security, LLC. This software was ! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad ! National Security, LLC for the U.S. Department of Energy. The U.S. diff --git a/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt b/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt index af2afdf3d..3f81ec782 100644 --- a/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt +++ b/cicecore/drivers/nuopc/cmeps/CICE_copyright.txt @@ -1,7 +1,7 @@ -! Copyright (c) 2023, Triad National Security, LLC +! Copyright (c) 2024, Triad National Security, LLC ! All rights reserved. ! -! Copyright 2023. Triad National Security, LLC. This software was +! Copyright 2024. Triad National Security, LLC. This software was ! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad ! National Security, LLC for the U.S. Department of Energy. The U.S. diff --git a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 index 5e6085c05..464bdb851 100644 --- a/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 +++ b/cicecore/drivers/nuopc/cmeps/ice_comp_nuopc.F90 @@ -926,6 +926,12 @@ subroutine InitializeRealize(gcomp, importState, exportState, clock, rc) end if enddo deallocate(lfieldnamelist) + call State_SetScalar(dble(0), flds_scalar_index_nx, exportState, & + flds_scalar_name, flds_scalar_num, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return + call State_SetScalar(dble(0), flds_scalar_index_ny, exportState, & + flds_scalar_name, flds_scalar_num, rc) + if (ChkErr(rc,__LINE__,u_FILE_u)) return ! ******************* ! *** RETURN HERE *** ! ******************* diff --git a/cicecore/drivers/nuopc/dmi/CICE.F90 b/cicecore/drivers/nuopc/dmi/CICE.F90 index 5ace27736..f993686e8 100644 --- a/cicecore/drivers/nuopc/dmi/CICE.F90 +++ b/cicecore/drivers/nuopc/dmi/CICE.F90 @@ -1,8 +1,8 @@ !======================================================================= -! Copyright (c) 2023, Triad National Security, LLC +! Copyright (c) 2024, Triad National Security, LLC ! All rights reserved. ! -! Copyright 2023. Triad National Security, LLC. This software was +! Copyright 2024. Triad National Security, LLC. This software was ! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad ! National Security, LLC for the U.S. Department of Energy. The U.S. diff --git a/cicecore/drivers/standalone/cice/CICE.F90 b/cicecore/drivers/standalone/cice/CICE.F90 index 5ace27736..f993686e8 100644 --- a/cicecore/drivers/standalone/cice/CICE.F90 +++ b/cicecore/drivers/standalone/cice/CICE.F90 @@ -1,8 +1,8 @@ !======================================================================= -! Copyright (c) 2023, Triad National Security, LLC +! Copyright (c) 2024, Triad National Security, LLC ! All rights reserved. ! -! Copyright 2023. Triad National Security, LLC. This software was +! Copyright 2024. Triad National Security, LLC. This software was ! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad ! National Security, LLC for the U.S. Department of Energy. The U.S. diff --git a/cicecore/drivers/unittest/opticep/CICE.F90 b/cicecore/drivers/unittest/opticep/CICE.F90 index 5ace27736..f993686e8 100644 --- a/cicecore/drivers/unittest/opticep/CICE.F90 +++ b/cicecore/drivers/unittest/opticep/CICE.F90 @@ -1,8 +1,8 @@ !======================================================================= -! Copyright (c) 2023, Triad National Security, LLC +! Copyright (c) 2024, Triad National Security, LLC ! All rights reserved. ! -! Copyright 2023. Triad National Security, LLC. This software was +! Copyright 2024. Triad National Security, LLC. This software was ! produced under U.S. Government contract DE-AC52-06NA25396 for Los ! Alamos National Laboratory (LANL), which is operated by Triad ! National Security, LLC for the U.S. Department of Energy. The U.S. diff --git a/cicecore/shared/ice_distribution.F90 b/cicecore/shared/ice_distribution.F90 index 0f3f6c198..d0768fc5a 100644 --- a/cicecore/shared/ice_distribution.F90 +++ b/cicecore/shared/ice_distribution.F90 @@ -15,6 +15,7 @@ module ice_distribution use ice_blocks, only: nblocks_x, nblocks_y, nblocks_tot, debug_blocks use ice_exit, only: abort_ice use ice_fileunits, only: nu_diag + use ice_memusage, only: ice_memusage_allocErr implicit none private @@ -33,15 +34,14 @@ module ice_distribution blockGlobalID ! global block id for each local block integer (int_kind), dimension(:), pointer :: blockCnt - integer (int_kind), dimension(:,:), pointer :: blockIndex - end type public :: create_distribution, & ice_distributionGet, & ice_distributionGetBlockLoc, & ice_distributionGetBlockID, & - create_local_block_ids + create_local_block_ids, & + proc_decomposition character (char_len), public :: & processor_shape ! 'square-pop' (approx) POP default config @@ -122,7 +122,8 @@ function create_distribution(dist_type, nprocs, work_per_block) case default - call abort_ice(subname//'ERROR: ice distribution: unknown distribution type') + call abort_ice(subname//'ERROR: ice distribution: unknown distribution type', & + file=__FILE__, line=__LINE__) end select @@ -152,7 +153,8 @@ subroutine create_local_block_ids(block_ids, distribution) !----------------------------------------------------------------------- integer (int_kind) :: & - n, bcount ! dummy counters + n, bcount, &! dummy counters + istat ! status flag for deallocate character(len=*),parameter :: subname='(create_local_block_ids)' @@ -167,9 +169,6 @@ subroutine create_local_block_ids(block_ids, distribution) if (distribution%blockLocation(n) == my_task+1) bcount = bcount + 1 end do - - if (bcount > 0) allocate(block_ids(bcount)) - !----------------------------------------------------------------------- ! ! now fill array with proper block ids @@ -177,6 +176,8 @@ subroutine create_local_block_ids(block_ids, distribution) !----------------------------------------------------------------------- if (bcount > 0) then + allocate(block_ids(bcount), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc block_ids')) return do n=1,size(distribution%blockLocation) if (distribution%blockLocation(n) == my_task+1) then block_ids(distribution%blockLocalID(n)) = n @@ -314,7 +315,8 @@ subroutine proc_decomposition(nprocs, nprocs_x, nprocs_y) end do proc_loop if (nprocs_x == 0) then - call abort_ice(subname//'ERROR: Unable to find 2d processor config') + call abort_ice(subname//'ERROR: Unable to find 2d processor config', & + file=__FILE__, line=__LINE__) endif if (my_task == master_task) then @@ -363,11 +365,16 @@ subroutine ice_distributionDestroy(distribution) !---------------------------------------------------------------------- deallocate(distribution%blockLocation, stat=istat) + if (ice_memusage_allocErr(istat,subname//'dealloc blockLocation')) return + deallocate(distribution%blockLocalID , stat=istat) + if (ice_memusage_allocErr(istat,subname//'dealloc blockLocalID')) return + deallocate(distribution%blockGlobalID, stat=istat) - deallocate(distribution%blockCnt , stat=istat) - deallocate(distribution%blockindex , stat=istat) + if (ice_memusage_allocErr(istat,subname//'dealloc blockGlobalID')) return + deallocate(distribution%blockCnt , stat=istat) + if (ice_memusage_allocErr(istat,subname//'dealloc blockCnt')) return !----------------------------------------------------------------------- @@ -382,19 +389,19 @@ subroutine ice_distributionGet(distribution,& ! This routine extracts information from a distribution. type (distrb), intent(in) :: & - distribution ! input distribution for which information - ! is requested + distribution ! input distribution for which information + ! is requested - integer (int_kind), intent(out), optional :: & - nprocs ,&! number of processors in this dist - communicator ,&! communicator to use in this dist - numLocalBlocks ! number of blocks distributed to this - ! local processor + integer (int_kind), intent(out), optional :: & + nprocs ,&! number of processors in this dist + communicator ,&! communicator to use in this dist + numLocalBlocks ! number of blocks distributed to this + ! local processor - integer (int_kind), dimension(:), optional :: & - blockLocation ,&! processor location for all blocks - blockLocalID ,&! local block id for all blocks - blockGlobalID ! global block id for each local block + integer (int_kind), dimension(:), optional :: & + blockLocation ,&! processor location for all blocks + blockLocalID ,&! local block id for all blocks + blockGlobalID ! global block id for each local block character(len=*),parameter :: subname='(ice_distributionGet)' @@ -413,7 +420,8 @@ subroutine ice_distributionGet(distribution,& if (associated(distribution%blockLocation)) then blockLocation = distribution%blockLocation else - call abort_ice(subname//'ERROR: blockLocation not allocated') + call abort_ice(subname//'ERROR: blockLocation not allocated', & + file=__FILE__, line=__LINE__) return endif endif @@ -422,7 +430,8 @@ subroutine ice_distributionGet(distribution,& if (associated(distribution%blockLocalID)) then blockLocalID = distribution%blockLocalID else - call abort_ice(subname//'ERROR: blockLocalID not allocated') + call abort_ice(subname//'ERROR: blockLocalID not allocated', & + file=__FILE__, line=__LINE__) return endif endif @@ -431,7 +440,8 @@ subroutine ice_distributionGet(distribution,& if (associated(distribution%blockGlobalID)) then blockGlobalID = distribution%blockGlobalID else - call abort_ice(subname//'ERROR: blockGlobalID not allocated') + call abort_ice(subname//'ERROR: blockGlobalID not allocated', & + file=__FILE__, line=__LINE__) return endif endif @@ -470,7 +480,8 @@ subroutine ice_distributionGetBlockLoc(distribution, blockID, & !----------------------------------------------------------------------- if (blockID < 0 .or. blockID > nblocks_tot) then - call abort_ice(subname//'ERROR: invalid block id') + call abort_ice(subname//'ERROR: invalid block id', & + file=__FILE__, line=__LINE__) return endif @@ -514,7 +525,8 @@ subroutine ice_distributionGetBlockID(distribution, localID, & !----------------------------------------------------------------------- if (localID < 0 .or. localID > distribution%numLocalBlocks) then - call abort_ice(subname//'ERROR: invalid local id') + call abort_ice(subname//'ERROR: invalid local id', & + file=__FILE__, line=__LINE__) return endif @@ -532,7 +544,7 @@ end subroutine ice_distributionGetBlockID !*********************************************************************** - function create_distrb_cart(nprocs, workPerBlock) result(newDistrb) + function create_distrb_cart(nprocs, workPerBlock, max_blocks_calc) result(newDistrb) ! This function creates a distribution of blocks across processors ! using a 2-d Cartesian distribution. @@ -541,11 +553,14 @@ function create_distrb_cart(nprocs, workPerBlock) result(newDistrb) nprocs ! number of processors in this distribution integer (int_kind), dimension(:), intent(in) :: & - workPerBlock ! amount of work per block + workPerBlock ! amount of work per block + + logical (log_kind), optional :: & + max_blocks_calc ! compute max_blocks (default true) type (distrb) :: & - newDistrb ! resulting structure describing Cartesian - ! distribution of blocks + newDistrb ! resulting structure describing Cartesian + ! distribution of blocks !---------------------------------------------------------------------- ! @@ -554,24 +569,31 @@ function create_distrb_cart(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- integer (int_kind) :: & - i, j, &! dummy loop indices + i, j, n, &! dummy loop indices istat, &! status flag for allocation iblock, jblock, &! is, ie, js, je, &! start, end block indices for each proc processor, &! processor position in cartesian decomp globalID, &! global block ID localID, &! block location on this processor - nprocsX, &! num of procs in x for global domain - nprocsY, &! num of procs in y for global domain + nprocsX, &! num of procs in x for global domain + nprocsY, &! num of procs in y for global domain numBlocksXPerProc, &! num of blocks per processor in x numBlocksYPerProc, &! num of blocks per processor in y numBlocksPerProc ! required number of blocks per processor - character(len=char_len) :: & - numBlocksPerProc_str ! required number of blocks per processor (as string) + logical (log_kind) :: & + lmax_blocks_calc ! local max_blocks_calc setting character(len=*),parameter :: subname='(create_distrb_cart)' +!---------------------------------------------------------------------- + + lmax_blocks_calc = .true. + if (present(max_blocks_calc)) then + lmax_blocks_calc = max_blocks_calc + endif + !---------------------------------------------------------------------- ! ! create communicator for this distribution @@ -590,27 +612,18 @@ function create_distrb_cart(nprocs, workPerBlock) result(newDistrb) call proc_decomposition(nprocs, nprocsX, nprocsY) - !---------------------------------------------------------------------- ! ! allocate space for decomposition ! !---------------------------------------------------------------------- - allocate (newDistrb%blockLocation(nblocks_tot), & - newDistrb%blockLocalID (nblocks_tot), stat=istat) - - if (istat > 0) then - call abort_ice( & - 'create_distrb_cart: error allocating blockLocation or blockLocalID') - return - endif - - allocate (newDistrb%blockCnt(nprocs)) - newDistrb%blockCnt(:) = 0 + allocate(newDistrb%blockLocation(nblocks_tot), & + newDistrb%blockLocalID (nblocks_tot), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockLocation or blockLocalID')) return - allocate(newDistrb%blockIndex(nprocs,max_blocks)) - newDistrb%blockIndex(:,:) = 0 + allocate(newDistrb%blockCnt(nprocs), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockLocation or blockLocalID')) return !---------------------------------------------------------------------- ! @@ -621,17 +634,10 @@ function create_distrb_cart(nprocs, workPerBlock) result(newDistrb) numBlocksXPerProc = (nblocks_x-1)/nprocsX + 1 numBlocksYPerProc = (nblocks_y-1)/nprocsY + 1 - ! Check if max_blocks is too small - numBlocksPerProc = numBlocksXPerProc * numBlocksYPerProc - if (numBlocksPerProc > max_blocks) then - write(numBlocksPerProc_str, '(i2)') numBlocksPerProc - call abort_ice(subname//'ERROR: max_blocks too small (need at least '//trim(numBlocksPerProc_str)//')') - return - endif - + newDistrb%blockCnt(:) = 0 do j=1,nprocsY do i=1,nprocsX - processor = (j-1)*nprocsX + i ! number the processors + processor = (j-1)*nprocsX + i ! number the processors ! left to right, bot to top is = (i-1)*numBlocksXPerProc + 1 ! starting block in i @@ -641,16 +647,14 @@ function create_distrb_cart(nprocs, workPerBlock) result(newDistrb) je = j *numBlocksYPerProc ! ending block in j if (je > nblocks_y) je = nblocks_y - localID = 0 ! initialize counter for local index do jblock = js,je do iblock = is,ie globalID = (jblock - 1)*nblocks_x + iblock if (workPerBlock(globalID) /= 0) then - localID = localID + 1 + newDistrb%blockCnt(processor) = newDistrb%blockCnt(processor) + 1 + localID = newDistrb%blockCnt(processor) newDistrb%blockLocation(globalID) = processor newDistrb%blockLocalID (globalID) = localID - newDistrb%blockCnt(processor) = newDistrb%blockCnt(processor) + 1 - newDistrb%blockIndex(processor,localID) = globalID else ! no work - eliminate block from distribution newDistrb%blockLocation(globalID) = 0 newDistrb%blockLocalID (globalID) = 0 @@ -658,64 +662,25 @@ function create_distrb_cart(nprocs, workPerBlock) result(newDistrb) end do end do - ! if this is the local processor, set number of local blocks - if (my_task == processor - 1) then - newDistrb%numLocalBlocks = localID - endif - end do end do -!---------------------------------------------------------------------- -! -! now store the local info -! -!---------------------------------------------------------------------- + newDistrb%numLocalBlocks = newDistrb%blockCnt(my_task+1) - if (newDistrb%numLocalBlocks > 0) then - allocate (newDistrb%blockGlobalID(newDistrb%numLocalBlocks), & - stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_cart: error allocating blockGlobalID') - return + ! set local blockGlobalID array + allocate(newDistrb%blockGlobalID(newDistrb%numLocalBlocks), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc numLocalBlocks')) return + do n = 1,nblocks_tot + if (my_task+1 == newDistrb%blockLocation(n)) then + localID = newDistrb%blockLocalID(n) + newDistrb%blockGlobalID (localID) = n endif + enddo - do j=1,nprocsY - do i=1,nprocsX - processor = (j-1)*nprocsX + i - - if (processor == my_task + 1) then - is = (i-1)*numBlocksXPerProc + 1 ! starting block in i - ie = i *numBlocksXPerProc ! ending block in i - if (ie > nblocks_x) ie = nblocks_x - js = (j-1)*numBlocksYPerProc + 1 ! starting block in j - je = j *numBlocksYPerProc ! ending block in j - if (je > nblocks_y) je = nblocks_y - - localID = 0 ! initialize counter for local index - do jblock = js,je - do iblock = is,ie - globalID = (jblock - 1)*nblocks_x + iblock - if (workPerBlock(globalID) /= 0) then - localID = localID + 1 - newDistrb%blockGlobalID (localID) = globalID - endif - end do - end do - - endif - - end do - end do - - else - allocate (newDistrb%blockGlobalID(newDistrb%numLocalBlocks), & - stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_cart: error allocating blockGlobalID') - return + ! set/check max_blocks + if (lmax_blocks_calc) then + if (max_blocks < 0) then + max_blocks = newDistrb%numLocalBlocks endif endif @@ -749,22 +714,23 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) ! !---------------------------------------------------------------------- - integer (int_kind) :: & - i,j,n ,&! dummy loop indices - pid ,&! dummy for processor id - istat ,&! status flag for allocates - localBlock ,&! local block position on processor - numOcnBlocks ,&! number of ocean blocks - maxWork ,&! max amount of work in any block - nprocsX ,&! num of procs in x for global domain - nprocsY ! num of procs in y for global domain + integer (int_kind) :: & + i, j, n, &! dummy loop indices + processor, &! dummy for processor id + istat, &! status flag for allocates + globalID, &! global block ID + localID, &! block location on this processor + numOcnBlocks, &! number of ocean blocks + maxWork, &! max amount of work in any block + nprocsX, &! num of procs in x for global domain + nprocsY ! num of procs in y for global domain integer (int_kind), dimension(:), allocatable :: & - priority ,&! priority for moving blocks - workTmp ,&! work per row or column for rake algrthm + priority, &! priority for moving blocks + workTmp, &! work per row or column for rake algrthm procTmp ! temp processor id for rake algrthm - type (distrb) :: dist ! temp hold distribution + type (distrb) :: dist ! temp hold distribution character(len=*),parameter :: subname='(create_distrb_rake)' @@ -774,7 +740,8 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) ! !---------------------------------------------------------------------- - dist = create_distrb_cart(nprocs, workPerBlock) + ! ignore max_block calc in create_distrb_cart and recompute below + dist = create_distrb_cart(nprocs, workPerBlock, max_blocks_calc=.false.) !---------------------------------------------------------------------- ! @@ -791,11 +758,7 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) write(nu_diag,*) subname,' 1d rake on entire distribution' allocate(priority(nblocks_tot), stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_rake: error allocating priority') - return - endif + if (ice_memusage_allocErr(istat,subname//'alloc priority')) return !*** initialize priority array @@ -811,11 +774,7 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) end do allocate(workTmp(nprocs), procTmp(nprocs), stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_rake: error allocating procTmp') - return - endif + if (ice_memusage_allocErr(istat,subname//'alloc procTmp')) return workTmp(:) = 0 do i=1,nprocs @@ -831,11 +790,7 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) priority, dist) deallocate(workTmp, procTmp, stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_rake: error deallocating procTmp') - return - endif + if (ice_memusage_allocErr(istat,subname//'dealloc procTmp')) return !---------------------------------------------------------------------- ! @@ -856,11 +811,7 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- allocate(priority(nblocks_tot), stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_rake: error allocating priority') - return - endif + if (ice_memusage_allocErr(istat,subname//'alloc priority')) return !*** set highest priority such that eastern-most blocks !*** and blocks with the least amount of work are @@ -879,20 +830,16 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) end do allocate(workTmp(nprocsX), procTmp(nprocsX), stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_rake: error allocating procTmp') - return - endif + if (ice_memusage_allocErr(istat,subname//'alloc procTmp')) return do j=1,nprocsY workTmp(:) = 0 do i=1,nprocsX - pid = (j-1)*nprocsX + i - procTmp(i) = pid + processor = (j-1)*nprocsX + i + procTmp(i) = processor do n=1,nblocks_tot - if (dist%blockLocation(n) == pid) then + if (dist%blockLocation(n) == processor) then workTmp(i) = workTmp(i) + workPerBlock(n) endif end do @@ -903,11 +850,7 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) end do deallocate(workTmp, procTmp, stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_rake: error deallocating procTmp') - return - endif + if (ice_memusage_allocErr(istat,subname//'dealloc procTmp')) return !---------------------------------------------------------------------- ! @@ -930,20 +873,16 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) end do allocate(workTmp(nprocsY), procTmp(nprocsY), stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_rake: error allocating procTmp') - return - endif + if (ice_memusage_allocErr(istat,subname//'alloc procTmp')) return do i=1,nprocsX workTmp(:) = 0 do j=1,nprocsY - pid = (j-1)*nprocsX + i - procTmp(j) = pid + processor = (j-1)*nprocsX + i + procTmp(j) = processor do n=1,nblocks_tot - if (dist%blockLocation(n) == pid) then + if (dist%blockLocation(n) == processor) then workTmp(j) = workTmp(j) + workPerBlock(n) endif end do @@ -955,11 +894,7 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) end do deallocate(workTmp, procTmp, priority, stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_rake: error deallocating procTmp') - return - endif + if (ice_memusage_allocErr(istat,subname//'dealloc procTmp')) return endif ! 1d or 2d rake @@ -975,76 +910,46 @@ function create_distrb_rake(nprocs, workPerBlock) result(newDistrb) allocate(newDistrb%blockLocation(nblocks_tot), & newDistrb%blockLocalID(nblocks_tot), stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_rake: error allocating blockLocation or blockLocalID') - return - endif + if (ice_memusage_allocErr(istat,subname//'alloc blockLocation or blockLocalID')) return - allocate (newDistrb%blockCnt(nprocs)) + allocate(newDistrb%blockCnt(nprocs), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockCnt')) return newDistrb%blockCnt(:) = 0 - allocate(newDistrb%blockIndex(nprocs,max_blocks)) - newDistrb%blockIndex(:,:) = 0 - - allocate(procTmp(nprocs), stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_rake: error allocating procTmp') - return - endif - - procTmp = 0 do n=1,nblocks_tot - pid = dist%blockLocation(n) ! processor id - newDistrb%blockLocation(n) = pid - - if (pid > 0) then - procTmp(pid) = procTmp(pid) + 1 - if (procTmp(pid) > max_blocks) then - call abort_ice(subname//'ERROR: max_blocks too small') - return - endif - newDistrb%blockLocalID (n) = procTmp(pid) - newDistrb%blockIndex(pid,procTmp(pid)) = n + globalID = n + processor = dist%blockLocation(globalID) ! processor id + newDistrb%blockLocation(globalID) = processor + + if (processor > 0) then + newDistrb%blockCnt(processor) = newDistrb%blockCnt(processor) + 1 + localID = newDistrb%blockCnt(processor) + newDistrb%blockLocation(globalID) = processor + newDistrb%blockLocalID (globalID) = localID else - newDistrb%blockLocalID (n) = 0 + newDistrb%blockLocation(globalID) = 0 + newDistrb%blockLocalID (globalID) = 0 endif end do - newDistrb%blockCnt(:) = procTmp(:) - newDistrb%numLocalBlocks = procTmp(my_task+1) - - if (minval(procTmp) < 1) then - call abort_ice(subname//'ERROR: processors left with no blocks') - return - endif - - deallocate(procTmp, stat=istat) - - if (istat > 0) then - call abort_ice(subname//'ERROR: allocating last procTmp') - return - endif + newDistrb%numLocalBlocks = newDistrb%blockCnt(my_task+1) - allocate(newDistrb%blockGlobalID(newDistrb%numLocalBlocks), & - stat=istat) - - if (istat > 0) then - call abort_ice(subname//'ERROR: allocating blockGlobalID') - return - endif - - localBlock = 0 - do n=1,nblocks_tot - if (newDistrb%blockLocation(n) == my_task+1) then - localBlock = localBlock + 1 - newDistrb%blockGlobalID(localBlock) = n + ! set local blockGlobalID array + allocate(newDistrb%blockGlobalID(newDistrb%numLocalBlocks), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc numLocalBlocks')) return + do n = 1,nblocks_tot + if (my_task+1 == newDistrb%blockLocation(n)) then + localID = newDistrb%blockLocalID(n) + newDistrb%blockGlobalID (localID) = n endif - end do + enddo -!---------------------------------------------------------------------- + ! set/check max_blocks + if (max_blocks < 0) then + max_blocks = newDistrb%numLocalBlocks + endif + ! destroy cart distribution call ice_distributionDestroy(dist) !---------------------------------------------------------------------- @@ -1060,7 +965,7 @@ function create_distrb_roundrobin(nprocs, workPerBlock) result(newDistrb) ! standalone CAM mode. integer (int_kind), intent(in) :: & - nprocs ! number of processors in this distribution + nprocs ! number of processors in this distribution integer (int_kind), dimension(:), intent(in) :: & workPerBlock ! amount of work per block @@ -1076,15 +981,12 @@ function create_distrb_roundrobin(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- integer (int_kind) :: & - i, j, &! dummy loop indices + i, j, n, &! dummy loop indices istat, &! status flag for allocation processor, &! processor position in cartesian decomp globalID, &! global block ID localID ! block location on this processor - integer (int_kind), dimension(:), allocatable :: & - proc_tmp ! temp processor id - character(len=*),parameter :: subname='(create_distrb_roundrobin)' !---------------------------------------------------------------------- @@ -1109,15 +1011,12 @@ function create_distrb_roundrobin(nprocs, workPerBlock) result(newDistrb) ! !---------------------------------------------------------------------- - allocate (newDistrb%blockLocation(nblocks_tot), & - newDistrb%blockLocalID (nblocks_tot), stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_roundrobin: error allocating blockLocation or blockLocalID') - return - endif + allocate(newDistrb%blockLocation(nblocks_tot), & + newDistrb%blockLocalID (nblocks_tot), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockLocation or blockLocalID')) return - allocate (newDistrb%blockCnt(nprocs)) + allocate(newDistrb%blockCnt(nprocs), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockCnt')) return !---------------------------------------------------------------------- ! @@ -1125,67 +1024,42 @@ function create_distrb_roundrobin(nprocs, workPerBlock) result(newDistrb) ! !---------------------------------------------------------------------- - allocate(proc_tmp(nprocs)) processor = 0 globalID = 0 - proc_tmp = 0 - - allocate(newDistrb%blockIndex(nprocs,max_blocks)) - newDistrb%blockIndex(:,:) = 0 + newDistrb%numLocalBlocks = 0 + newDistrb%blockCnt(:) = 0 + ! compute decomposition do j=1,nblocks_y do i=1,nblocks_x - globalID = globalID + 1 - if (workPerBlock(globalID) /= 0) then processor = mod(processor,nprocs) + 1 - proc_tmp(processor) = proc_tmp(processor) + 1 - localID = proc_tmp(processor) - if (localID > max_blocks) then - call abort_ice(subname//'ERROR: max_blocks too small') - return - endif + newDistrb%blockCnt(processor) = newDistrb%blockCnt(processor) + 1 + localID = newDistrb%blockCnt(processor) newDistrb%blockLocation(globalID) = processor newDistrb%blockLocalID (globalID) = localID - newDistrb%blockIndex(processor,localID) = globalID else ! no work - eliminate block from distribution newDistrb%blockLocation(globalID) = 0 newDistrb%blockLocalID (globalID) = 0 endif + enddo + enddo + newDistrb%numLocalBlocks = newDistrb%blockCnt(my_task+1) + + ! set local blockGlobalID array + allocate(newDistrb%blockGlobalID(newDistrb%numLocalBlocks), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc numLocalBlocks')) return + do n = 1,nblocks_tot + if (my_task+1 == newDistrb%blockLocation(n)) then + localID = newDistrb%blockLocalID(n) + newDistrb%blockGlobalID (localID) = n + endif + enddo - end do - end do - - newDistrb%numLocalBlocks = proc_tmp(my_task+1) - newDistrb%blockCnt(:) = proc_tmp(:) - deallocate(proc_tmp) - -! write(nu_diag,*) 'my_task,newDistrb%numLocalBlocks',& -! my_task,newDistrb%numLocalBlocks - -!---------------------------------------------------------------------- -! -! now store the local info -! -!---------------------------------------------------------------------- - - globalID = 0 - - if (newDistrb%numLocalBlocks > 0) then - allocate (newDistrb%blockGlobalID(newDistrb%numLocalBlocks), & - stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_roundrobin: error allocating numLocalBlocks') - return - endif - - processor = my_task + 1 - do localID = 1,newDistrb%numLocalBlocks - newDistrb%blockGlobalID (localID) = newDistrb%blockIndex(processor,& - localID) - enddo + ! set/check max_blocks + if (max_blocks < 0) then + max_blocks = newDistrb%numLocalBlocks endif !---------------------------------------------------------------------- @@ -1201,7 +1075,7 @@ function create_distrb_spiralcenter(nprocs, workPerBlock) result(newDistrb) ! standalone CAM mode. integer (int_kind), intent(in) :: & - nprocs ! number of processors in this distribution + nprocs ! number of processors in this distribution integer (int_kind), dimension(:), intent(in) :: & workPerBlock ! amount of work per block @@ -1218,14 +1092,13 @@ function create_distrb_spiralcenter(nprocs, workPerBlock) result(newDistrb) integer (int_kind) :: & n, i, j, ic, jc, id, jd, cnt, &! dummy loop indices - istat, &! status flag for allocation - processor, &! processor position in cartesian decomp - nblocklist, &! number of blocks in blocklist - globalID, &! global block ID - localID ! block location on this processor + istat, &! status flag for allocation + processor, &! processor position in cartesian decomp + nblocklist, &! number of blocks in blocklist + globalID, &! global block ID + localID ! block location on this processor integer (int_kind), dimension(:), allocatable :: & - proc_tmp, &! temp processor id blocklist ! temp block ordered list integer (int_kind), dimension(:,:), allocatable :: & blockchk ! temp block check array @@ -1254,10 +1127,12 @@ function create_distrb_spiralcenter(nprocs, workPerBlock) result(newDistrb) ! !---------------------------------------------------------------------- - allocate (newDistrb%blockLocation(nblocks_tot), & - newDistrb%blockLocalID (nblocks_tot), stat=istat) + allocate(newDistrb%blockLocation(nblocks_tot), & + newDistrb%blockLocalID (nblocks_tot), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockLocation or blockLocalID')) return - allocate (newDistrb%blockCnt(nprocs)) + allocate(newDistrb%blockCnt(nprocs), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockCnt')) return !---------------------------------------------------------------------- ! @@ -1270,18 +1145,15 @@ function create_distrb_spiralcenter(nprocs, workPerBlock) result(newDistrb) ! !---------------------------------------------------------------------- - allocate(proc_tmp(nprocs)) - allocate(blocklist(nblocks_tot)) - allocate(blockchk(nblocks_x,nblocks_y)) + allocate(blocklist(nblocks_tot), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blocklist')) return + allocate(blockchk(nblocks_x,nblocks_y), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockchk')) return nblocklist = 0 blocklist = 0 blockchk = 0 processor = 0 globalID = 0 - proc_tmp = 0 - - allocate(newDistrb%blockIndex(nprocs,max_blocks)) - newDistrb%blockIndex(:,:) = 0 jc = nblocks_y/2 ic = nblocks_x/2 @@ -1353,10 +1225,12 @@ function create_distrb_spiralcenter(nprocs, workPerBlock) result(newDistrb) if (nblocklist /= nblocks_x*nblocks_y .or. & maxval(blockchk) /= 1 .or. minval(blockchk) /= 1) then - call abort_ice(subname//'ERROR: blockchk invalid') + call abort_ice(subname//'ERROR: blockchk invalid', & + file=__FILE__, line=__LINE__) return endif - deallocate(blockchk) + deallocate(blockchk, stat=istat) + if (ice_memusage_allocErr(istat,subname//'dealloc blockchk')) return !---------------------------------------------------------------------- ! @@ -1364,55 +1238,42 @@ function create_distrb_spiralcenter(nprocs, workPerBlock) result(newDistrb) ! !---------------------------------------------------------------------- - do n = 1,nblocklist - - globalID = blocklist(n) - - if (workPerBlock(globalID) /= 0) then - processor = mod(processor,nprocs) + 1 - proc_tmp(processor) = proc_tmp(processor) + 1 - localID = proc_tmp(processor) - if (localID > max_blocks) then - call abort_ice(subname//'ERROR: max_blocks too small') - return - endif - newDistrb%blockLocation(globalID) = processor - newDistrb%blockLocalID (globalID) = localID - newDistrb%blockIndex(processor,localID) = globalID - else ! no work - eliminate block from distribution - newDistrb%blockLocation(globalID) = 0 - newDistrb%blockLocalID (globalID) = 0 - endif + newDistrb%numLocalBlocks = 0 + newDistrb%blockCnt(:) = 0 + do n = 1,nblocklist + globalID = blocklist(n) + if (workPerBlock(globalID) /= 0) then + processor = mod(processor,nprocs) + 1 + newDistrb%blockCnt(processor) = newDistrb%blockCnt(processor) + 1 + localID = newDistrb%blockCnt(processor) + newDistrb%blockLocation(globalID) = processor + newDistrb%blockLocalID (globalID) = localID + else ! no work - eliminate block from distribution + newDistrb%blockLocation(globalID) = 0 + newDistrb%blockLocalID (globalID) = 0 + endif end do + newDistrb%numLocalBlocks = newDistrb%blockCnt(my_task+1) + + ! set local blockGlobalID array + allocate(newDistrb%blockGlobalID(newDistrb%numLocalBlocks), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc numLocalBlocks')) return + do n = 1,nblocks_tot + if (my_task+1 == newDistrb%blockLocation(n)) then + localID = newDistrb%blockLocalID(n) + newDistrb%blockGlobalID (localID) = n + endif + enddo - newDistrb%numLocalBlocks = proc_tmp(my_task+1) - newDistrb%blockCnt(:) = proc_tmp(:) - deallocate(proc_tmp) - deallocate(blocklist) - -! write(nu_diag,*) 'my_task,newDistrb%numLocalBlocks',& -! my_task,newDistrb%numLocalBlocks - -!---------------------------------------------------------------------- -! -! now store the local info -! -!---------------------------------------------------------------------- - - globalID = 0 - - if (newDistrb%numLocalBlocks > 0) then - allocate (newDistrb%blockGlobalID(newDistrb%numLocalBlocks), & - stat=istat) - - processor = my_task + 1 - do localID = 1,newDistrb%numLocalBlocks - newDistrb%blockGlobalID (localID) = newDistrb%blockIndex(processor,& - localID) - enddo + ! set/check max_blocks + if (max_blocks < 0) then + max_blocks = newDistrb%numLocalBlocks endif + deallocate(blocklist, stat=istat) + if (ice_memusage_allocErr(istat,subname//'dealloc blocklist')) return + !---------------------------------------------------------------------- end function create_distrb_spiralcenter @@ -1426,7 +1287,7 @@ function create_distrb_wghtfile(nprocs, workPerBlock) result(newDistrb) ! standalone CAM mode. integer (int_kind), intent(in) :: & - nprocs ! number of processors in this distribution + nprocs ! number of processors in this distribution integer (int_kind), dimension(:), intent(in) :: & workPerBlock ! amount of work per block @@ -1449,9 +1310,6 @@ function create_distrb_wghtfile(nprocs, workPerBlock) result(newDistrb) globalID, &! global block ID localID ! block location on this processor - integer (int_kind), dimension(:), allocatable :: & - proc_tmp ! temp processor id - logical (log_kind) :: up ! direction of pe counting character(len=*),parameter :: subname='(create_distrb_wghtfile)' @@ -1478,10 +1336,12 @@ function create_distrb_wghtfile(nprocs, workPerBlock) result(newDistrb) ! !---------------------------------------------------------------------- - allocate (newDistrb%blockLocation(nblocks_tot), & - newDistrb%blockLocalID (nblocks_tot), stat=istat) + allocate(newDistrb%blockLocation(nblocks_tot), & + newDistrb%blockLocalID (nblocks_tot), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockLocation or blockLocalID')) return - allocate (newDistrb%blockCnt(nprocs)) + allocate(newDistrb%blockCnt(nprocs), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockCnt')) return !---------------------------------------------------------------------- ! @@ -1491,94 +1351,76 @@ function create_distrb_wghtfile(nprocs, workPerBlock) result(newDistrb) ! !---------------------------------------------------------------------- - allocate(proc_tmp(nprocs)) processor = 0 - proc_tmp = 0 + newDistrb%numLocalBlocks = 0 + newDistrb%blockCnt(:) = 0 up = .true. - allocate(newDistrb%blockIndex(nprocs,max_blocks)) - newDistrb%blockIndex(:,:) = 0 - if (my_task == master_task) & write(nu_diag,*) subname,' workPerBlock = ',minval(workPerBlock),maxval(workPerBlock) if (minval(workPerBlock) < 0 .or. maxval(workPerBlock) > 12) then write(nu_diag,*) subname,' workPerBlock = ',minval(workPerBlock),maxval(workPerBlock) - call abort_ice(subname//'ERROR: workPerBlock incorrect') + call abort_ice(subname//'ERROR: workPerBlock incorrect', & + file=__FILE__, line=__LINE__) return endif ! do not distribution blocks with work=0 - do n=maxval(workPerBlock),1,-1 - cnt = 0 - do j=1,nblocks_y - do i=1,nblocks_x - - if (mod(j,2) == 1) then - globalID = (j-1)*nblocks_x + i - else - globalID = (j-1)*nblocks_x + nblocks_x - i + 1 - endif - - if (workPerBlock(globalID) == 0) then ! no work - eliminate block from distribution - newDistrb%blockLocation(globalID) = 0 - newDistrb%blockLocalID (globalID) = 0 - elseif (workPerBlock(globalID) == n) then - cnt = cnt + 1 -! processor = mod(processor,nprocs) + 1 - if (up) then - processor = processor + 1 + do n = maxval(workPerBlock),1,-1 + cnt = 0 + do j=1,nblocks_y + do i=1,nblocks_x + if (mod(j,2) == 1) then + globalID = (j-1)*nblocks_x + i else - processor = processor - 1 - endif - if (processor > nprocs) then - up = .false. - processor = nprocs - elseif (processor < 1) then - up = .true. - processor = 1 + globalID = (j-1)*nblocks_x + nblocks_x - i + 1 endif - proc_tmp(processor) = proc_tmp(processor) + 1 - localID = proc_tmp(processor) - if (localID > max_blocks) then - call abort_ice(subname//'ERROR: max_blocks too small') - return + if (workPerBlock(globalID) == 0) then ! no work - eliminate block from distribution + newDistrb%blockLocation(globalID) = 0 + newDistrb%blockLocalID (globalID) = 0 + elseif (workPerBlock(globalID) == n) then + cnt = cnt + 1 + if (up) then + processor = processor + 1 + else + processor = processor - 1 + endif + if (processor > nprocs) then + up = .false. + processor = nprocs + elseif (processor < 1) then + up = .true. + processor = 1 + endif + newDistrb%blockCnt(processor) = newDistrb%blockCnt(processor) + 1 + localID = newDistrb%blockCnt(processor) + newDistrb%blockLocation(globalID) = processor + newDistrb%blockLocalID (globalID) = localID endif - newDistrb%blockLocation(globalID) = processor - newDistrb%blockLocalID (globalID) = localID - newDistrb%blockIndex(processor,localID) = globalID - endif - - end do - end do -! write(nu_diag,*) 'create_distrb_wghtfile n cnt = ',n,cnt + end do + end do +! write(nu_diag,*) subname,'n cnt = ',n,cnt end do + newDistrb%numLocalBlocks = newDistrb%blockCnt(my_task+1) + + ! set local blockGlobalID array + allocate(newDistrb%blockGlobalID(newDistrb%numLocalBlocks), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc numLocalBlocks')) return + do n = 1,nblocks_tot + if (my_task+1 == newDistrb%blockLocation(n)) then + localID = newDistrb%blockLocalID(n) + newDistrb%blockGlobalID (localID) = n + endif + enddo - newDistrb%numLocalBlocks = proc_tmp(my_task+1) - newDistrb%blockCnt(:) = proc_tmp(:) - deallocate(proc_tmp) + ! set/check max_blocks + if (max_blocks < 0) then + max_blocks = newDistrb%numLocalBlocks + endif -! write(nu_diag,*) 'my_task,newDistrb%numLocalBlocks',& +! write(nu_diag,*) subname,'my_task,newDistrb%numLocalBlocks',& ! my_task,newDistrb%numLocalBlocks -!---------------------------------------------------------------------- -! -! now store the local info -! -!---------------------------------------------------------------------- - - globalID = 0 - - if (newDistrb%numLocalBlocks > 0) then - allocate (newDistrb%blockGlobalID(newDistrb%numLocalBlocks), & - stat=istat) - - processor = my_task + 1 - do localID = 1,newDistrb%numLocalBlocks - newDistrb%blockGlobalID (localID) = newDistrb%blockIndex(processor,& - localID) - enddo - endif - !---------------------------------------------------------------------- end function create_distrb_wghtfile @@ -1592,7 +1434,7 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) ! standalone CAM mode. integer (int_kind), intent(in) :: & - nprocs ! number of processors in this distribution + nprocs ! number of processors in this distribution integer (int_kind), dimension(:), intent(in) :: & workPerBlock ! amount of work per block @@ -1608,18 +1450,15 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) !---------------------------------------------------------------------- integer (int_kind) :: & - i, j, &! dummy loop indices + i, j, n, &! dummy loop indices istat, &! status flag for allocation mblocks, &! estimate of max blocks per pe processor, &! processor position in cartesian decomp globalID, &! global block ID localID ! block location on this processor - integer (int_kind), dimension(:), allocatable :: & - proc_tmp ! temp processor id - logical (log_kind), dimension(:), allocatable :: & - bfree ! map of assigned blocks + bfree ! map of assigned blocks, true = free integer (int_kind) :: cnt, blktogether, i2 integer (int_kind) :: totblocks, nchunks @@ -1649,15 +1488,12 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) ! !---------------------------------------------------------------------- - allocate (newDistrb%blockLocation(nblocks_tot), & - newDistrb%blockLocalID (nblocks_tot), stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_sectrobin: error allocating blockLocation or blockLocalID') - return - endif + allocate(newDistrb%blockLocation(nblocks_tot), & + newDistrb%blockLocalID (nblocks_tot), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockLocation or blockLocalID')) return - allocate (newDistrb%blockCnt(nprocs)) + allocate(newDistrb%blockCnt(nprocs), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockCnt')) return !---------------------------------------------------------------------- ! @@ -1665,15 +1501,12 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) ! !---------------------------------------------------------------------- - allocate(proc_tmp(nprocs)) processor = 0 globalID = 0 - proc_tmp = 0 - - allocate(newDistrb%blockIndex(nprocs,max_blocks)) - newDistrb%blockIndex(:,:) = 0 - - allocate(bfree(nblocks_x*nblocks_y)) + newDistrb%numLocalBlocks = 0 + newDistrb%blockCnt(:) = 0 + allocate(bfree(nblocks_x*nblocks_y), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc bfree')) return bfree=.true. totblocks = 0 @@ -1695,12 +1528,13 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) blktogether = max(1,nint(float(totblocks)/float(6*nprocs))) -! write(nu_diag,*) 'ice_distrb_sectrobin totblocks = ',totblocks,nblocks_y*nblocks_x +! write(nu_diag,*) subname,'totblocks = ',totblocks,nblocks_y*nblocks_x !------------------------------ ! southern group of blocks ! weave back and forth in i vs j ! go south to north, low - high pes + ! keepgoing to false to stop distribution !------------------------------ processor=1 @@ -1719,24 +1553,18 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) cnt = 0 if (processor == 1) keepgoing = .false. endif -! write(nu_diag,'(a,6i7,l2)') 'tcx ',i,j,globalID,cnt,blktogether,processor,keepgoing +! write(nu_diag,'(a,6i7,l2)') subname,i,j,globalID,cnt,blktogether,processor,keepgoing if (keepgoing) then if (bfree(globalID)) then if (workPerBlock(globalID) /= 0) then - proc_tmp(processor) = proc_tmp(processor) + 1 - localID = proc_tmp(processor) - if (localID > max_blocks) then - call abort_ice(subname//'ERROR: max_blocks too small') - return - endif + newDistrb%blockCnt(processor) = newDistrb%blockCnt(processor) + 1 + localID = newDistrb%blockCnt(processor) newDistrb%blockLocation(globalID) = processor newDistrb%blockLocalID (globalID) = localID - newDistrb%blockIndex(processor,localID) = globalID cnt = cnt + 1 totblocks = totblocks-1 bfree(globalID) = .false. - else ! no work - eliminate block from distribution bfree(globalID) = .false. newDistrb%blockLocation(globalID) = 0 @@ -1747,12 +1575,13 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) end do end do -! write(nu_diag,*) 'ice_distrb_sectrobin totblocks left after southern = ',totblocks +! write(nu_diag,*) subname,'totblocks left after southern = ',totblocks !------------------------------ ! northern group of blocks ! weave back and forth in i vs j ! go north to south, high - low pes + ! keepgoing to false to stop distribution !------------------------------ processor=nprocs @@ -1775,19 +1604,13 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) if (keepgoing) then if (bfree(globalID)) then if (workPerBlock(globalID) /= 0) then - proc_tmp(processor) = proc_tmp(processor) + 1 - localID = proc_tmp(processor) - if (localID > max_blocks) then - call abort_ice(subname//'ERROR: max_blocks too small') - return - endif + newDistrb%blockCnt(processor) = newDistrb%blockCnt(processor) + 1 + localID = newDistrb%blockCnt(processor) newDistrb%blockLocation(globalID) = processor newDistrb%blockLocalID (globalID) = localID - newDistrb%blockIndex(processor,localID) = globalID cnt = cnt + 1 - totblocks = totblocks - 1 + totblocks = totblocks-1 bfree(globalID) = .false. - else ! no work - eliminate block from distribution bfree(globalID) = .false. newDistrb%blockLocation(globalID) = 0 @@ -1798,12 +1621,13 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) end do end do -! write(nu_diag,*) 'ice_distrb_sectrobin totblocks left after northern = ',totblocks +! write(nu_diag,*) subname,'totblocks left after northern = ',totblocks !------------------------------ ! central group of blocks ! weave back and forth in i vs j ! go north to south, low - high / low - high pes + ! distribute rest of blocks in 2 chunks per proc !------------------------------ nchunks = 2*nprocs @@ -1819,35 +1643,29 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) endif globalID = (j-1)*nblocks_x + i2 if (totblocks > 0) then - do while (proc_tmp(processor) >= mblocks .or. cnt >= blktogether) - nchunks = nchunks - 1 - if (nchunks == 0) then - blktogether = 1 - else - blktogether = max(1,nint(float(totblocks)/float(nchunks))) - endif - cnt = 0 - processor = mod(processor,nprocs) + 1 - enddo + do while (newDistrb%blockCnt(processor) >= mblocks .or. cnt >= blktogether) + nchunks = nchunks - 1 + if (nchunks == 0) then + blktogether = 1 + else + blktogether = max(1,nint(float(totblocks)/float(nchunks))) + endif + cnt = 0 + processor = mod(processor,nprocs) + 1 + enddo endif -! write(nu_diag,*) 'ice_distrb_sectrobin central ',i,j,totblocks,cnt,nchunks,blktogether,processor +! write(nu_diag,*) subname,'central ',i,j,totblocks,cnt,nchunks,blktogether,processor if (bfree(globalID)) then if (workPerBlock(globalID) /= 0) then - proc_tmp(processor) = proc_tmp(processor) + 1 - localID = proc_tmp(processor) - if (localID > max_blocks) then - call abort_ice(subname//'ERROR: max_blocks too small') - return - endif + newDistrb%blockCnt(processor) = newDistrb%blockCnt(processor) + 1 + localID = newDistrb%blockCnt(processor) newDistrb%blockLocation(globalID) = processor newDistrb%blockLocalID (globalID) = localID - newDistrb%blockIndex(processor,localID) = globalID cnt = cnt + 1 totblocks = totblocks-1 bfree(globalID) = .false. - else ! no work - eliminate block from distribution bfree(globalID) = .false. newDistrb%blockLocation(globalID) = 0 @@ -1857,34 +1675,25 @@ function create_distrb_sectrobin(nprocs, workPerBlock) result(newDistrb) end do end do - newDistrb%numLocalBlocks = proc_tmp(my_task+1) - newDistrb%blockCnt(:) = proc_tmp(:) - deallocate(proc_tmp) - deallocate(bfree) + newDistrb%numLocalBlocks = newDistrb%blockCnt(my_task+1) -!---------------------------------------------------------------------- -! -! now store the local info -! -!---------------------------------------------------------------------- - - globalID = 0 + ! set local blockGlobalID array + allocate(newDistrb%blockGlobalID(newDistrb%numLocalBlocks), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc numLocalBlocks')) return + do n = 1,nblocks_tot + if (my_task+1 == newDistrb%blockLocation(n)) then + localID = newDistrb%blockLocalID(n) + newDistrb%blockGlobalID (localID) = n + endif + enddo - if (newDistrb%numLocalBlocks > 0) then - allocate (newDistrb%blockGlobalID(newDistrb%numLocalBlocks), & - stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_sectrobin: error allocating numLocalBlocks') - return + ! set/check max_blocks + if (max_blocks < 0) then + max_blocks = newDistrb%numLocalBlocks endif - processor = my_task + 1 - do localID = 1,newDistrb%numLocalBlocks - newDistrb%blockGlobalID (localID) = newDistrb%blockIndex(processor,& - localID) - enddo - endif + deallocate(bfree, stat=istat) + if (ice_memusage_allocErr(istat,subname//'dealloc bfree')) return !---------------------------------------------------------------------- @@ -1899,7 +1708,7 @@ function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) ! standalone CAM mode. integer (int_kind), intent(in) :: & - nprocs ! number of processors in this distribution + nprocs ! number of processors in this distribution integer (int_kind), dimension(:), intent(in) :: & workPerBlock ! amount of work per block @@ -1923,9 +1732,6 @@ function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) blktogether, &! number of blocks together cnt ! counter - integer (int_kind), dimension(:), allocatable :: & - proc_tmp ! temp processor id - integer (int_kind) :: n character(len=*),parameter :: subname='(create_distrb_sectcart)' @@ -1952,27 +1758,19 @@ function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) ! !---------------------------------------------------------------------- - allocate (newDistrb%blockLocation(nblocks_tot), & - newDistrb%blockLocalID (nblocks_tot), stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_sectcart: error allocating blockLocation or blockLocalID') - return - endif + allocate(newDistrb%blockLocation(nblocks_tot), & + newDistrb%blockLocalID (nblocks_tot), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockLocation or blockLocalID')) return + + allocate(newDistrb%blockCnt(nprocs), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockCnt')) return - allocate (newDistrb%blockCnt(nprocs)) !---------------------------------------------------------------------- ! ! distribute blocks linearly across processors in quadrants ! !---------------------------------------------------------------------- - allocate(proc_tmp(nprocs)) - proc_tmp = 0 - - allocate(newDistrb%blockIndex(nprocs,max_blocks)) - newDistrb%blockIndex(:,:) = 0 - blktogether = max(1,nint(float(nblocks_x*nblocks_y)/float(4*nprocs))) ! --- two phases, reset processor and cnt for each phase @@ -1980,10 +1778,14 @@ function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) ! --- phase 2 is north to south, east to west on the right half of the domain if (mod(nblocks_x,2) /= 0) then - call abort_ice(subname//'ERROR: nblocks_x not divisible by 2') + call abort_ice(subname//'ERROR: nblocks_x not divisible by 2', & + file=__FILE__, line=__LINE__) return endif + newDistrb%numLocalBlocks = 0 + newDistrb%blockCnt(:) = 0 + do n=1,2 processor = 1 cnt = 0 @@ -2006,15 +1808,10 @@ function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) cnt = cnt + 1 if (workPerBlock(globalID) /= 0) then - proc_tmp(processor) = proc_tmp(processor) + 1 - localID = proc_tmp(processor) - if (localID > max_blocks) then - call abort_ice(subname//'ERROR: max_blocks too small') - return - endif + newDistrb%blockCnt(processor) = newDistrb%blockCnt(processor) + 1 + localID = newDistrb%blockCnt(processor) newDistrb%blockLocation(globalID) = processor newDistrb%blockLocalID (globalID) = localID - newDistrb%blockIndex(processor,localID) = globalID else ! no work - eliminate block from distribution newDistrb%blockLocation(globalID) = 0 newDistrb%blockLocalID (globalID) = 0 @@ -2023,36 +1820,21 @@ function create_distrb_sectcart(nprocs, workPerBlock) result(newDistrb) end do end do end do + newDistrb%numLocalBlocks = newDistrb%blockCnt(my_task+1) + + ! set local blockGlobalID array + allocate(newDistrb%blockGlobalID(newDistrb%numLocalBlocks), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc numLocalBlocks')) return + do n = 1,nblocks_tot + if (my_task+1 == newDistrb%blockLocation(n)) then + localID = newDistrb%blockLocalID(n) + newDistrb%blockGlobalID (localID) = n + endif + enddo - newDistrb%numLocalBlocks = proc_tmp(my_task+1) - newDistrb%blockCnt(:) = proc_tmp(:) - deallocate(proc_tmp) - -! write(nu_diag,*) 'my_task,newDistrb%numLocalBlocks',& -! my_task,newDistrb%numLocalBlocks - -!---------------------------------------------------------------------- -! -! now store the local info -! -!---------------------------------------------------------------------- - - globalID = 0 - - if (newDistrb%numLocalBlocks > 0) then - allocate (newDistrb%blockGlobalID(newDistrb%numLocalBlocks), & - stat=istat) - if (istat > 0) then - call abort_ice( & - 'create_distrb_sectcart: error allocating numLocalBlocks') - return - endif - - processor = my_task + 1 - do localID = 1,newDistrb%numLocalBlocks - newDistrb%blockGlobalID (localID) = newDistrb%blockIndex(processor,& - localID) - enddo + ! set/check max_blocks + if (max_blocks < 0) then + max_blocks = newDistrb%numLocalBlocks endif !---------------------------------------------------------------------- @@ -2061,7 +1843,7 @@ end function create_distrb_sectcart !********************************************************************** - function create_distrb_spacecurve(nprocs,work_per_block) + function create_distrb_spacecurve(nprocs,work_per_block) result(newDistrb) ! This function distributes blocks across processors in a ! load-balanced manner using space-filling curves @@ -2070,14 +1852,14 @@ function create_distrb_spacecurve(nprocs,work_per_block) use ice_spacecurve integer (int_kind), intent(in) :: & - nprocs ! number of processors in this distribution + nprocs ! number of processors in this distribution integer (int_kind), dimension(:), intent(in) :: & - work_per_block ! amount of work per block + work_per_block ! amount of work per block type (distrb) :: & - create_distrb_spacecurve ! resulting structure describing - ! load-balanced distribution of blocks + newDistrb ! resulting structure describing Cartesian + ! distribution of blocks !---------------------------------------------------------------------- ! @@ -2086,16 +1868,18 @@ function create_distrb_spacecurve(nprocs,work_per_block) !---------------------------------------------------------------------- integer (int_kind) :: & - i,j,n ,&! dummy loop indices - pid ,&! dummy for processor id + i, j, n, &! dummy loop indices + istat, &! status flag for allocation + processor, &! processor position in cartesian decomp + globalID, &! global block ID localID ! local block position on processor integer (int_kind), dimension(:),allocatable :: & idxT_i,idxT_j ! Temporary indices for SFC integer (int_kind), dimension(:,:),allocatable :: & - Mesh ,&! !arrays to hold Space-filling curve - Mesh2 ,&! + Mesh, &! !arrays to hold Space-filling curve + Mesh2, &! Mesh3 ! integer (int_kind) :: & @@ -2110,11 +1894,6 @@ function create_distrb_spacecurve(nprocs,work_per_block) integer (int_kind) :: subNum, sfcNum logical :: foundx - integer (int_kind), dimension(:), allocatable :: & - proc_tmp ! temp processor id for rake algrthm - - type (distrb) :: dist ! temp hold distribution - character(len=*),parameter :: subname='(create_distrb_spacecurve)' !------------------------------------------------------ @@ -2125,10 +1904,39 @@ function create_distrb_spacecurve(nprocs,work_per_block) !------------------------------------------------------ if((.not. IsFactorable(nblocks_y)) .or. (.not. IsFactorable(nblocks_x))) then - create_distrb_spacecurve = create_distrb_cart(nprocs, work_per_block) + newDistrb = create_distrb_cart(nprocs, work_per_block) return endif +!---------------------------------------------------------------------- +! +! create communicator for this distribution +! +!---------------------------------------------------------------------- + + call create_communicator(newDistrb%communicator, nprocs) + +!---------------------------------------------------------------------- +! +! try to find best processor arrangement +! +!---------------------------------------------------------------------- + + newDistrb%nprocs = nprocs + +!---------------------------------------------------------------------- +! +! allocate space for decomposition +! +!---------------------------------------------------------------------- + + allocate(newDistrb%blockLocation(nblocks_tot), & + newDistrb%blockLocalID (nblocks_tot), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockLocation or blockLocalID')) return + + allocate(newDistrb%blockCnt(nprocs), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc blockCnt')) return + !----------------------------------------------- ! Factor the numbers of blocks in each dimension !----------------------------------------------- @@ -2155,36 +1963,16 @@ function create_distrb_spacecurve(nprocs,work_per_block) sb_x = ProdFactor(xdim) sb_y = ProdFactor(ydim) - call create_communicator(dist%communicator, nprocs) - - dist%nprocs = nprocs - - !---------------------------------------------------------------------- - ! - ! allocate space for decomposition - ! - !---------------------------------------------------------------------- - - allocate (dist%blockLocation(nblocks_tot), & - dist%blockLocalID (nblocks_tot)) - - dist%blockLocation=0 - dist%blockLocalID =0 - - allocate (dist%blockCnt(nprocs)) - dist%blockCnt(:) = 0 - - allocate(dist%blockIndex(nprocs,max_blocks)) - dist%blockIndex(:,:) = 0 - !---------------------------------------------------------------------- ! Create the array to hold the SFC and indices into it !---------------------------------------------------------------------- - allocate(Mesh(curveSize,curveSize)) - allocate(Mesh2(nblocks_x,nblocks_y)) - allocate(Mesh3(nblocks_x,nblocks_y)) - allocate(idxT_i(nblocks_tot),idxT_j(nblocks_tot)) + allocate(Mesh(curveSize,curveSize), & + Mesh2(nblocks_x,nblocks_y), & + Mesh3(nblocks_x,nblocks_y), & + idxT_i(nblocks_tot), & + idxT_j(nblocks_tot), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc meshes')) return Mesh = 0 Mesh2 = 0 @@ -2265,7 +2053,7 @@ function create_distrb_spacecurve(nprocs,work_per_block) ! ! First region gets nblocksL+1 blocks per partition ! Second region gets nblocksL blocks per partition -! if(debug_blocks) write(nu_diag,*) 'nprocs,extra,nblocks,nblocksL,s1: ', & +! if(debug_blocks) write(nu_diag,*) subname,'nprocs,extra,nblocks,nblocksL,s1: ', & ! nprocs,extra,nblocks,nblocksL,s1 !----------------------------------------------------------- @@ -2284,7 +2072,7 @@ function create_distrb_spacecurve(nprocs,work_per_block) ! ------------------------------------ ii=ii-1 tmp1 = ii/(nblocksL+1) - dist%blockLocation(n) = tmp1+1 + newDistrb%blockLocation(n) = tmp1+1 else ! ------------------------------------ ! If on the second region of curve @@ -2292,7 +2080,7 @@ function create_distrb_spacecurve(nprocs,work_per_block) ! ------------------------------------ ii=ii-s1-1 tmp1 = ii/nblocksL - dist%blockLocation(n) = extra + tmp1 + 1 + newDistrb%blockLocation(n) = extra + tmp1 + 1 endif endif enddo @@ -2302,54 +2090,52 @@ function create_distrb_spacecurve(nprocs,work_per_block) ! Reset the dist data structure !---------------------------------------------------------------------- - allocate(proc_tmp(nprocs)) - proc_tmp = 0 + globalID = 0 + newDistrb%numLocalBlocks = 0 + newDistrb%blockCnt(:) = 0 do n=1,nblocks_tot - pid = dist%blockLocation(n) - !!!dist%blockLocation(n) = pid - - if(pid>0) then - proc_tmp(pid) = proc_tmp(pid) + 1 - if (proc_tmp(pid) > max_blocks) then - call abort_ice(subname//'ERROR: max_blocks too small') - return - endif - dist%blockLocalID(n) = proc_tmp(pid) - dist%blockIndex(pid,proc_tmp(pid)) = n - else - dist%blockLocalID(n) = 0 + globalID = n + processor = newDistrb%blockLocation(globalID) + if (processor > 0) then + newDistrb%blockCnt(processor) = newDistrb%blockCnt(processor) + 1 + localID = newDistrb%blockCnt(processor) + newDistrb%blockLocalID (globalID) = localID + else ! no work - eliminate block from distribution + newDistrb%blockLocation(globalID) = 0 + newDistrb%blockLocalID (globalID) = 0 endif enddo - dist%numLocalBlocks = proc_tmp(my_task+1) - dist%blockCnt(:) = proc_tmp(:) + newDistrb%numLocalBlocks = newDistrb%blockCnt(my_task+1) - if (dist%numLocalBlocks > 0) then - allocate (dist%blockGlobalID(dist%numLocalBlocks)) - dist%blockGlobalID = 0 - endif - localID = 0 - do n=1,nblocks_tot - if (dist%blockLocation(n) == my_task+1) then - localID = localID + 1 - dist%blockGlobalID(localID) = n + ! set local blockGlobalID array + allocate(newDistrb%blockGlobalID(newDistrb%numLocalBlocks), stat=istat) + if (ice_memusage_allocErr(istat,subname//'alloc numLocalBlocks')) return + do n = 1,nblocks_tot + if (my_task+1 == newDistrb%blockLocation(n)) then + localID = newDistrb%blockLocalID(n) + newDistrb%blockGlobalID (localID) = n endif enddo + ! set/check max_blocks + if (max_blocks < 0) then + max_blocks = newDistrb%numLocalBlocks + endif + ! if (debug_blocks) then -! if (my_task == master_task) write(nu_diag,*) 'dist%blockLocation:= ',dist%blockLocation -! write(nu_diag,*) 'IAM: ',my_task,' SpaceCurve: Number of blocks {total,local} :=', & -! nblocks_tot,nblocks,proc_tmp(my_task+1) +! if (my_task == master_task) write(nu_diag,*) subname,'dist%blockLocation:= ',dist%blockLocation +! write(nu_diag,*) subname,'IAM: ',my_task,' SpaceCurve: Number of blocks {total,local} :=', & +! nblocks_tot,nblocks,newDistrb%numLocalBlocks ! endif + !--------------------------------- ! Deallocate temporary arrays !--------------------------------- - deallocate(proc_tmp) - deallocate(Mesh,Mesh2,Mesh3) - deallocate(idxT_i,idxT_j) - create_distrb_spacecurve = dist ! return the result + deallocate(Mesh,Mesh2,Mesh3,idxT_i,idxT_j, stat=istat) + if (ice_memusage_allocErr(istat,subname//'dealloc meshes')) return !---------------------------------------------------------------------- @@ -2373,11 +2159,11 @@ subroutine ice_distributionRake (procWork, procID, blockWork, & ! ensure a block does not stray too far from its neighbors. integer (int_kind), intent(in), dimension(:) :: & - blockWork ,&! amount of work per block + blockWork, &! amount of work per block procID ! global processor number integer (int_kind), intent(inout), dimension(:) :: & - procWork ,&! amount of work per processor + procWork, &! amount of work per processor priority ! priority for moving a given block type (distrb), intent(inout) :: & @@ -2393,7 +2179,7 @@ subroutine ice_distributionRake (procWork, procID, blockWork, & i, n, &! dummy loop indices np1, &! n+1 corrected for cyclical wrap iproc, inext, &! processor ids for current and next - nprocs, numBlocks, &! number of blocks, processors + nprocs, numBlocks, &! number of blocks, processors lastPriority, &! priority for most recent block minPriority, &! minimum priority lastLoc, &! location for most recent block diff --git a/cicecore/shared/ice_domain_size.F90 b/cicecore/shared/ice_domain_size.F90 index 999a35f48..b0ac9b036 100644 --- a/cicecore/shared/ice_domain_size.F90 +++ b/cicecore/shared/ice_domain_size.F90 @@ -21,7 +21,7 @@ module ice_domain_size ! namelist integer (kind=int_kind), public :: & - max_blocks , & ! max number of blocks per processor + max_blocks , & ! number of blocks allocated per task block_size_x, & ! size of block in first horiz dimension block_size_y, & ! size of block in second horiz dimension nx_global , & ! i-axis size @@ -47,15 +47,6 @@ module ice_domain_size integer (kind=int_kind), public, parameter :: & max_nstrm = 5 ! max number of history output streams - !*** The model will inform the user of the correct - !*** values for the parameter below. A value higher than - !*** necessary will not cause the code to fail, but will - !*** allocate more memory than is necessary. A value that - !*** is too low will cause the code to exit. - !*** A good initial guess is found using - !*** max_blocks = (nx_global/block_size_x)*(ny_global/block_size_y)/ - !*** num_procs - !======================================================================= end module ice_domain_size diff --git a/cicecore/version.txt b/cicecore/version.txt index c908e44d9..083549d70 100644 --- a/cicecore/version.txt +++ b/cicecore/version.txt @@ -1 +1 @@ -CICE 6.5.0 +CICE 6.5.1 diff --git a/configuration/scripts/cice.batch.csh b/configuration/scripts/cice.batch.csh index 50ef665bd..520d165a3 100755 --- a/configuration/scripts/cice.batch.csh +++ b/configuration/scripts/cice.batch.csh @@ -48,6 +48,31 @@ cat >> ${jobfile} << EOFB ###PBS -m be EOFB +else if (${ICE_MACHINE} =~ gadi*) then +if (${queue} =~ *sr) then #sapphire rapids + @ memuse = ( $ncores * 481 / 100 ) +else if (${queue} =~ *bw) then #broadwell + @ memuse = ( $ncores * 457 / 100 ) +else if (${queue} =~ *sl) then + @ memuse = ( $ncores * 6 ) +else #normal queues + @ memuse = ( $ncores * 395 / 100 ) +endif +cat >> ${jobfile} << EOFB +#PBS -q ${queue} +#PBS -P ${ICE_MACHINE_PROJ} +#PBS -N ${ICE_CASENAME} +#PBS -l storage=gdata/${ICE_MACHINE_PROJ}+scratch/${ICE_MACHINE_PROJ}+gdata/ik11 +#PBS -l ncpus=${ncores} +#PBS -l mem=${memuse}gb +#PBS -l walltime=${batchtime} +#PBS -j oe +#PBS -W umask=003 +#PBS -o ${ICE_CASEDIR} +source /etc/profile.d/modules.csh +module use `echo ${MODULEPATH} | sed 's/:/ /g'` #copy the users modules +EOFB + else if (${ICE_MACHINE} =~ gust*) then cat >> ${jobfile} << EOFB #PBS -q ${queue} diff --git a/configuration/scripts/cice.launch.csh b/configuration/scripts/cice.launch.csh index 51c8f044f..f8eb0a5d2 100755 --- a/configuration/scripts/cice.launch.csh +++ b/configuration/scripts/cice.launch.csh @@ -46,6 +46,18 @@ mpiexec --cpu-bind depth -n ${ntasks} -ppn ${taskpernodelimit} -d ${nthrds} ./ci EOFR endif +#======= +else if (${ICE_MACHCOMP} =~ gadi*) then +if (${ICE_COMMDIR} =~ serial*) then +cat >> ${jobfile} << EOFR +./cice >&! \$ICE_RUNLOG_FILE +EOFR +else +cat >> ${jobfile} << EOFR +mpirun -n ${ntasks} ./cice >&! \$ICE_RUNLOG_FILE +EOFR +endif + #======= else if (${ICE_MACHCOMP} =~ hobart* || ${ICE_MACHCOMP} =~ izumi*) then if (${ICE_COMMDIR} =~ serial*) then diff --git a/configuration/scripts/cice_decomp.csh b/configuration/scripts/cice_decomp.csh index bcf27beee..d990c628f 100755 --- a/configuration/scripts/cice_decomp.csh +++ b/configuration/scripts/cice_decomp.csh @@ -167,7 +167,8 @@ setenv ICE_DECOMP_NXGLOB $nxglob setenv ICE_DECOMP_NYGLOB $nyglob setenv ICE_DECOMP_BLCKX $blckx setenv ICE_DECOMP_BLCKY $blcky -setenv ICE_DECOMP_MXBLCKS $mxblcks +# tcraig, do not override max blocks value of -1 +#setenv ICE_DECOMP_MXBLCKS $mxblcks setenv ICE_DECOMP_DECOMP $decomp setenv ICE_DECOMP_DSHAPE $dshape diff --git a/configuration/scripts/ciceplots.csh b/configuration/scripts/ciceplots.csh new file mode 100755 index 000000000..43528b33e --- /dev/null +++ b/configuration/scripts/ciceplots.csh @@ -0,0 +1,56 @@ +#!/bin/csh -f + +source ${MODULESHOME}/init/csh + +# User defined stuff +# Set case and case directory +# Set files, notes, fstr, and fields + +set case = "CICE6.5.1" +set casedir = "/glade/derecho/scratch/tcraig/CICE_RUNS/cgx1proda" + +# setup plots + +set histdir = "${casedir}/history" + +set files = ("${histdir}/iceh.2012-03.nc" \ + "${histdir}/iceh.2012-09.nc" ) +set notes = ("2012 March Mean" \ + "2012 Sept Mean" ) +set fstrs = ("Mar12" \ + "Sep12" ) + +set fields = ("aice" "hi" "hs") + +#conda config --add channels conda-forge +#conda config --set channel_priority strict +#conda search basemap --channel conda-forge +#conda create -p /glade/u/home/tcraig/conda/envs/basemap -c conda-forge basemap=1.4.1 basemap-data basemap-data-hires netCDF4 + +module load conda +source ${NCAR_ROOT_CONDA}/etc/profile.d/conda.csh + +conda activate /glade/u/home/tcraig/conda/envs/basemap + +echo " " +echo " " + +echo ./timeseries.py \"${casedir}\" --case \"${case}\" --grid +./timeseries.py "${casedir}" --case "${case}" --grid + +echo " " + +set cnt = 0 +while ($cnt < ${#files}) + @ cnt = $cnt + 1 + set file = "${files[$cnt]}" + set note = "${notes[$cnt]}" + set fstr = "${fstrs[$cnt]}" + foreach field ($fields) + echo ./ciceplots2d.py \"$field\" \"$file\" \"$case\" \"$note\" \"$fstr\" + ./ciceplots2d.py "$field" "$file" "$case" "$note" "$fstr" + end +end + +echo "DONE" + diff --git a/configuration/scripts/ciceplots2d.py b/configuration/scripts/ciceplots2d.py new file mode 100755 index 000000000..2ad73e66f --- /dev/null +++ b/configuration/scripts/ciceplots2d.py @@ -0,0 +1,148 @@ +#!/usr/bin/env python3 + +#Importing the necessary libraries +import sys +import os +import numpy as np +from netCDF4 import Dataset +import matplotlib as mpl +import matplotlib.pyplot as plt +from mpl_toolkits.basemap import Basemap + +if len(sys.argv) != 6: + print("ciceplots.py requires 5 arguments") + print(" 1. field name in file, ie. \"aice\"") + print(" 2. cice history file full path, ie. \"/glade/scratch/user/case/history/iceh.2012-03.nc\"") + print(" 3. case name, used to annotate plot, ie. \"CICE6.5.1\"") + print(" 4. notes, used to annotate plot, ie. 2012 \"March Mean\"") + print(" 5. file string, use to create unique png filenames, ie. \"Mar12\"") + quit() + +field = sys.argv[1] +pathf = sys.argv[2] +casen = sys.argv[3] +notes = sys.argv[4] +fstr = sys.argv[5] +fname = os.path.basename(pathf) +title = field + " " + notes +cfnam = casen + " " + fname +#print("field = ",field) +#print("pathf = ",pathf) +#print("casen = ",casen) +#print("notes = ",notes) +#print("fname = ",fname) +#print("title = ",title) +#print("cfnam = ",cfnam) + +#Reading the netCDF file +data = Dataset(pathf,'r') +#print (data) + +lons = data.variables['TLON'][:,:] +lats = data.variables['TLAT'][:,:] +var1 = data.variables[field][:,:,:] +var1 = var1[0,:,:] +var1[ var1==0.00 ] = np.nan +#mask = data.variables['tmask'][:,:] +#mask[ mask>0.5 ] = np.nan + +#print("lons.shape = ",lons.shape) +#print("var1.shape = ",var1.shape) + +# Lon/Lat Projection + +#print("Plot global") +#m = Basemap(projection='cyl',llcrnrlat=-90,urcrnrlat=90, +# llcrnrlon=0,urcrnrlon=360,resolution='c') +m = Basemap(projection='cyl',llcrnrlat=-90,urcrnrlat=90, + llcrnrlon=0,urcrnrlon=360,resolution='l') +fig, ax = plt.subplots() +#plt.figure(figsize=(6,4)) +m.drawcoastlines(linewidth=0.2) +m.fillcontinents(color='black',lake_color='white') +#draw parallels and meridians. +m.drawparallels(np.arange(-60.,61.,30.),labels=[1,0,0,0]) +m.drawmeridians(np.arange(0.,361.,45.),labels=[1,0,0,1]) +#draw map boundary +m.drawmapboundary(fill_color='white') +#setting colorbar +cmap = plt.get_cmap('jet') +barticks = None +norm = "linear" +if field in ['hi']: + bounds = np.arange(0,2.05,0.1) + bounds = np.append(bounds,[2.25,2.5,2.75,3.0,3.25,3.5,3.75,4.0]) + norm = mpl.colors.BoundaryNorm(bounds,cmap.N,extend='max') + barticks=[0,0.5,1.0,1.5,2.0,2.5,3.0,3.5,4.0] +if field in ['hs']: + bounds = np.arange(0,1.02,0.05) + bounds = np.append(bounds,[1.5,2.0,2.5,3.0,3.5,4.0]) + norm = mpl.colors.BoundaryNorm(bounds,cmap.N,extend='max') + barticks=[0,0.25,0.5,0.75,1.0,2.0,3.0,4.0] +#matplotlib scatter-plot +m.scatter(lons,lats,c=var1,cmap=cmap,marker='o',s=0.2,norm=norm) +m.colorbar(label=field, ticks=barticks) +plt.rcParams["figure.dpi"] = 300 +plt.title(title) +plt.text(x=0.0,y=-0.1,s=cfnam,transform=ax.transAxes,horizontalalignment='left',verticalalignment='top',fontsize='x-small') +oname = field + "_gl_" + fstr + ".png" +print('Saving file to ',oname) +plt.savefig(oname) +#plt.show() +plt.close() + +# North Polar Stereographic Projection + +#print("Plot NH") +#m = Basemap(projection='npstere',boundinglat=45,lon_0=-45,resolution='c') +m = Basemap(projection='npstere',boundinglat=45,lon_0=-45,resolution='l') +fig, ax = plt.subplots() +#plt.figure(figsize=(6,4)) +m.drawcoastlines(linewidth=0.2) +m.fillcontinents(color='black',lake_color='white') +# draw parallels and meridians. +m.drawparallels(np.arange(-60.,61.,30.),labels=[0,0,0,0]) +m.drawmeridians(np.arange(0.,361.,45.),labels=[0,0,0,0]) +m.drawmapboundary(fill_color='white') +#setting colorbar (set above) +m.scatter(lons,lats,c=var1,cmap=cmap,marker='o',s=0.2,latlon=True,norm=norm) +#m.colorbar(label=field) +m.colorbar(label=field, ticks=barticks) +plt.rcParams["figure.dpi"] = 300 +plt.title (title) +plt.text(x=0.0,y=-0.02,s=cfnam,transform=ax.transAxes,horizontalalignment='left',verticalalignment='top',fontsize='x-small') +oname = field + "_nh_" + fstr + ".png" +print('Saving file to ',oname) +plt.savefig(oname) +#plt.show() +plt.close() + +# South Polar Stereographic Projection + +#print("Plot SH") +#m = Basemap(projection='npstere',boundinglat=45,lon_0=-45,resolution='c') +m = Basemap(projection='spstere',boundinglat=-45,lon_0=180,resolution='l') +fig, ax = plt.subplots() +#plt.figure(figsize=(6,4)) +m.drawcoastlines(linewidth=0.2) +m.fillcontinents(color='black',lake_color='white') +# draw parallels and meridians. +m.drawparallels(np.arange(-60.,61.,30.),labels=[0,0,0,0]) +m.drawmeridians(np.arange(0.,361.,45.),labels=[0,0,0,0]) +m.drawmapboundary(fill_color='white') +#setting colorbar (set above) +m.scatter(lons,lats,c=var1,cmap=cmap,marker='o',s=0.2,latlon=True,norm=norm) +#m.colorbar(label=field) +m.colorbar(label=field, ticks=barticks) +plt.rcParams["figure.dpi"] = 300 +plt.title (title) +plt.text(x=0.0,y=-0.02,s=cfnam,transform=ax.transAxes,horizontalalignment='left',verticalalignment='top',fontsize='x-small') +oname = field + "_sh_" + fstr + ".png" +print('Saving file to ',oname) +plt.savefig(oname) +#plt.show() +plt.close() + +#print("Done") +quit() + diff --git a/configuration/scripts/ice_in b/configuration/scripts/ice_in index 63a97d7d8..ad29e05ce 100644 --- a/configuration/scripts/ice_in +++ b/configuration/scripts/ice_in @@ -302,7 +302,7 @@ / &domain_nml - nprocs = 4 + nprocs = -1 nx_global = 100 ny_global = 116 block_size_x = 25 diff --git a/configuration/scripts/machines/Macros.compy_intel b/configuration/scripts/machines/Macros.compy_intel index 604337f59..eabdbc00a 100644 --- a/configuration/scripts/machines/Macros.compy_intel +++ b/configuration/scripts/machines/Macros.compy_intel @@ -3,7 +3,7 @@ #============================================================================== CPP := fpp -CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CPPDEFS := -DFORTRANUNDERSCORE -DNO_HDF5 ${ICE_CPPDEFS} CFLAGS := -c -O2 -fp-model precise -xHost FIXEDFLAGS := -132 diff --git a/configuration/scripts/machines/Macros.gadi_intel b/configuration/scripts/machines/Macros.gadi_intel new file mode 100644 index 000000000..df7746731 --- /dev/null +++ b/configuration/scripts/machines/Macros.gadi_intel @@ -0,0 +1,73 @@ +#============================================================================== +# Makefile macros for NCI Gadi, intel compiler +#============================================================================== + +CPP := fpp +CPPDEFS := -DFORTRANUNDERSCORE -DREPRODUCIBLE ${ICE_CPPDEFS} +CFLAGS := -c -O2 -fp-model precise -Wno-unused-variable -Wno-unused-parameter + +FIXEDFLAGS := -132 +FREEFLAGS := -FR + +NCI_INTEL_FLAGS := -r8 -i4 -traceback -w -fpe0 -ftz -convert big_endian -assume byterecl -check noarg_temp_created +NCI_REPRO_FLAGS := -fp-model precise -fp-model source -align all + +ifeq ($(ICE_BLDDEBUG), true) + NCI_DEBUG_FLAGS := -g3 -O0 -debug all -check all -no-vec -assume nobuffered_io + FFLAGS := $(NCI_INTEL_FLAGS) $(NCI_REPRO_FLAGS) $(NCI_DEBUG_FLAGS) + CPPDEFS := $(CPPDEFS) -DDEBUG=$(DEBUG) +else + NCI_OPTIM_FLAGS := -g3 -O2 -axCORE-AVX2 -debug all -check none -qopt-report=5 -qopt-report-annotate -assume buffered_io + FFLAGS := $(NCI_INTEL_FLAGS) $(NCI_REPRO_FLAGS) $(NCI_OPTIM_FLAGS) +endif + +SCC := icx +SFC := ifort +MPICC := mpicc +MPIFC := mpifort + +ifeq ($(ICE_COMMDIR), mpi) + FC := $(MPIFC) + CC := $(MPICC) +else + FC := $(SFC) + CC := $(SCC) +endif +LD:= $(FC) + +SLIBS := $(SLIBS) +INCLDIR := $(INCLDIR) + +# if spack modules loaded, use them. otherwise use system modules +ifndef SPACK_NETCDF_FORTRAN_ROOT + SLIBS += -L$(NETCDF)/lib -lnetcdf -lnetcdff + INCLDIR += -I$(NETCDF)/include +else + SLIBS += -L$(SPACK_NETCDF_C_ROOT)/lib64 -lnetcdf + SLIBS += -L$(SPACK_NETCDF_FORTRAN_ROOT)/lib -lnetcdff + INCLDIR += -I$(SPACK_NETCDF_C_ROOT)/include + INCLDIR += -I$(SPACK_NETCDF_FORTRAN_ROOT)/include +endif + +ifeq ($(ICE_THREADED), true) + LDFLAGS += -qopenmp + CFLAGS += -qopenmp + FFLAGS += -qopenmp +endif + +ifeq ($(ICE_IOTYPE), pio1) + LIB_PIO := $(PIO_LIBDIR) + SLIBS += -L$(LIB_PIO) -lpio +endif + +ifeq ($(ICE_IOTYPE), pio2) + ifndef SPACK_PARALLELIO_ROOT + SLIBS += -L$(PARALLELIO_ROOT)/lib -lpioc -lpiof + else + SLIBS += -L$(SPACK_PARALLELIO_ROOT)/lib -lpioc -lpiof + INCLDIR += -I $(SPACK_PARALLELIO_ROOT)/include + endif + + SLIBS += $(SLIBS) -L$(OMPI_BASE)/lib -lmpi_usempif08 -lmpi_usempi_ignore_tkr -lmpi_mpifh + +endif \ No newline at end of file diff --git a/configuration/scripts/machines/Macros.gaffney_gnu b/configuration/scripts/machines/Macros.gaffney_gnu index 4ae235bc9..4d4c53971 100644 --- a/configuration/scripts/machines/Macros.gaffney_gnu +++ b/configuration/scripts/machines/Macros.gaffney_gnu @@ -3,7 +3,7 @@ #============================================================================== CPP := ftn -E -CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CPPDEFS := -DFORTRANUNDERSCORE -DNO_CDF5 ${ICE_CPPDEFS} CFLAGS := -c FIXEDFLAGS := -ffixed-line-length-132 diff --git a/configuration/scripts/machines/Macros.gaffney_intel b/configuration/scripts/machines/Macros.gaffney_intel index 7eccd36da..03c3c3251 100644 --- a/configuration/scripts/machines/Macros.gaffney_intel +++ b/configuration/scripts/machines/Macros.gaffney_intel @@ -3,7 +3,7 @@ #============================================================================== CPP := fpp -CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CPPDEFS := -DFORTRANUNDERSCORE -DNO_CDF5 ${ICE_CPPDEFS} CFLAGS := -c -O2 -fp-model precise -xHost FIXEDFLAGS := -132 diff --git a/configuration/scripts/machines/Macros.izumi_nag b/configuration/scripts/machines/Macros.izumi_nag index c12edb904..9265c9de1 100644 --- a/configuration/scripts/machines/Macros.izumi_nag +++ b/configuration/scripts/machines/Macros.izumi_nag @@ -3,7 +3,7 @@ #============================================================================== CPP := /usr/bin/cpp -CPPDEFS := -DFORTRANUNDERSCORE -DNO_R16 $(ICE_CPPDEFS) +CPPDEFS := -DFORTRANUNDERSCORE -DNO_R16 -DNO_CDF5 $(ICE_CPPDEFS) CFLAGS := -c FIXEDFLAGS := -fixed diff --git a/configuration/scripts/machines/Macros.koehr_intel b/configuration/scripts/machines/Macros.koehr_intel index aee4b31a8..cd593e33b 100644 --- a/configuration/scripts/machines/Macros.koehr_intel +++ b/configuration/scripts/machines/Macros.koehr_intel @@ -3,7 +3,7 @@ #============================================================================== CPP := fpp -CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CPPDEFS := -DFORTRANUNDERSCORE -DNO_CDF5 ${ICE_CPPDEFS} CFLAGS := -c -O2 -fp-model precise -xHost FIXEDFLAGS := -132 diff --git a/configuration/scripts/machines/Macros.mustang_intel18 b/configuration/scripts/machines/Macros.mustang_intel18 index 28c1c1964..03a2b8891 100644 --- a/configuration/scripts/machines/Macros.mustang_intel18 +++ b/configuration/scripts/machines/Macros.mustang_intel18 @@ -3,7 +3,7 @@ #============================================================================== CPP := fpp -CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CPPDEFS := -DFORTRANUNDERSCORE -DNO_CDF5 ${ICE_CPPDEFS} CFLAGS := -c -O2 -fp-model precise -xHost FIXEDFLAGS := -132 diff --git a/configuration/scripts/machines/Macros.mustang_intel19 b/configuration/scripts/machines/Macros.mustang_intel19 index 28c1c1964..03a2b8891 100644 --- a/configuration/scripts/machines/Macros.mustang_intel19 +++ b/configuration/scripts/machines/Macros.mustang_intel19 @@ -3,7 +3,7 @@ #============================================================================== CPP := fpp -CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CPPDEFS := -DFORTRANUNDERSCORE -DNO_CDF5 ${ICE_CPPDEFS} CFLAGS := -c -O2 -fp-model precise -xHost FIXEDFLAGS := -132 diff --git a/configuration/scripts/machines/Macros.mustang_intel20 b/configuration/scripts/machines/Macros.mustang_intel20 index 28c1c1964..03a2b8891 100644 --- a/configuration/scripts/machines/Macros.mustang_intel20 +++ b/configuration/scripts/machines/Macros.mustang_intel20 @@ -3,7 +3,7 @@ #============================================================================== CPP := fpp -CPPDEFS := -DFORTRANUNDERSCORE ${ICE_CPPDEFS} +CPPDEFS := -DFORTRANUNDERSCORE -DNO_CDF5 ${ICE_CPPDEFS} CFLAGS := -c -O2 -fp-model precise -xHost FIXEDFLAGS := -132 diff --git a/configuration/scripts/machines/env.gadi_intel b/configuration/scripts/machines/env.gadi_intel new file mode 100644 index 000000000..9d056bc50 --- /dev/null +++ b/configuration/scripts/machines/env.gadi_intel @@ -0,0 +1,56 @@ +#!/bin/csh -f + +set inp = "undefined" +if ($#argv == 1) then + set inp = $1 +endif + +if ("$inp" != "-nomodules") then + + source /etc/profile.d/modules.csh + + module load intel-compiler + module load openmpi + + if ($?ICE_IOTYPE) then + if ($ICE_IOTYPE =~ pio*) then + if ($ICE_IOTYPE == "pio1") then + # we don't have pio1 installed anywhere + module load pnetcdf + module load netcdf + module load pio + else + module load parallelio + endif + else + module load netcdf + endif + endif + + if ($?ICE_BFBTYPE) then + if ($ICE_BFBTYPE =~ qcchk*) then + # conda/analysis has the required librarys, skip building from cice yaml file + module use /g/data/hh5/public/modules + module load conda/analysis + # conda env create -f ../../configuration/scripts/tests/qctest.yml + # conda activate qctest + endif + endif + +endif + +setenv ICE_MACHINE_MACHNAME gadi +setenv ICE_MACHINE_MACHINFO "Intel Xeon Scalable" +setenv ICE_MACHINE_ENVNAME intel +setenv ICE_MACHINE_ENVINFO "INTEL_COMPILER_VERSION $INTEL_COMPILER_VERSION, OMPI_VERSION $OMPI_VERSION" +setenv ICE_MACHINE_MAKE gmake +setenv ICE_MACHINE_WKDIR /scratch/$PROJECT/$USER/CICE_RUNS +setenv ICE_MACHINE_INPUTDATA /g/data/ik11/inputs +setenv ICE_MACHINE_BASELINE /scratch/$PROJECT/$USER/CICE_BASELINE +setenv ICE_MACHINE_SUBMIT "qsub" +setenv ICE_MACHINE_PROJ $PROJECT +setenv ICE_MACHINE_ACCT $USER +setenv ICE_MACHINE_QUEUE "normal" +setenv ICE_MACHINE_TPNODE 48 +setenv ICE_MACHINE_BLDTHRDS 4 +setenv ICE_MACHINE_QSTAT "qstat" diff --git a/configuration/scripts/machines/env.hera_intel b/configuration/scripts/machines/env.hera_intel index 6698c0c2c..9bab973b6 100644 --- a/configuration/scripts/machines/env.hera_intel +++ b/configuration/scripts/machines/env.hera_intel @@ -10,8 +10,9 @@ if ("$inp" != "-nomodules") then source /etc/profile.d/modules.csh #module list module purge -module load intel/18.0.5.274 -module load impi/2018.0.4 +module load gnu/13.2.0 +module load intel/2023.2.0 +module load impi/2023.2.0 module load netcdf/4.7.0 #module list @@ -23,7 +24,7 @@ setenv OMP_STACKSIZE 64M setenv ICE_MACHINE_MACHNAME hera setenv ICE_MACHINE_MACHINFO "Cray CS500 Intel SkyLake 2.4GHz, Infiniband HDR" setenv ICE_MACHINE_ENVNAME intel -setenv ICE_MACHINE_ENVINFO "ifort 18.0.5 20180823, intelmpi/2018.0.4, netcdf/4.7.0" +setenv ICE_MACHINE_ENVINFO "icc/ifort 2021.10.0 20230609, intelmpi/2023.2.0, netcdf/4.7.0" setenv ICE_MACHINE_MAKE gmake setenv ICE_MACHINE_WKDIR $HOME/scratch/CICE_RUNS setenv ICE_MACHINE_INPUTDATA /home/Anthony.Craig/scratch/CICE_INPUTDATA diff --git a/configuration/scripts/machines/environment.yml b/configuration/scripts/machines/environment.yml index 30ed1e148..119bf7ea0 100644 --- a/configuration/scripts/machines/environment.yml +++ b/configuration/scripts/machines/environment.yml @@ -15,6 +15,9 @@ dependencies: - matplotlib-base - cartopy - netcdf4 + - basemap=1.4.1 + - basemap-data + - basemap-data-hires # Python dependencies for building the HTML documentation - sphinx - sphinxcontrib-bibtex diff --git a/configuration/scripts/options/set_nml.run8year b/configuration/scripts/options/set_nml.run8year new file mode 100644 index 000000000..1515fa7c9 --- /dev/null +++ b/configuration/scripts/options/set_nml.run8year @@ -0,0 +1,7 @@ +npt_unit = 'y' +npt = 8 +dumpfreq = 'y' +dumpfreq_n = 1 +diagfreq = 24 +histfreq = 'm','x','x','x','x' + diff --git a/configuration/scripts/tests/decomp_suite.ts b/configuration/scripts/tests/decomp_suite.ts index 8d47506d6..d33572f0b 100644 --- a/configuration/scripts/tests/decomp_suite.ts +++ b/configuration/scripts/tests/decomp_suite.ts @@ -1,8 +1,10 @@ # Test Grid PEs Sets BFB-compare restart gx3 4x2x25x29x4 dslenderX2 restart gx1 64x1x16x16x10 dwghtfile +restart gx1 32x2x10x12x32 dsectcart,short restart gbox180 16x1x6x6x60 dspacecurve,debugblocks decomp gx3 4x2x25x29x5 none +decomp gx3 4x2x25x29 none decomp gx3 4x2x25x29x5 dynpicard,reprosum decomp gx3 4x2x25x29x5 dyneap restart gx3 1x1x50x58x4 droundrobin,thread restart_gx3_4x2x25x29x4_dslenderX2 @@ -13,7 +15,7 @@ restart gx3 6x2x50x58x1 droundrobin restart_gx3_4x2x25x2 restart gx3 5x2x33x23x4 droundrobin restart_gx3_4x2x25x29x4_dslenderX2 restart gx3 4x2x19x19x10 droundrobin restart_gx3_4x2x25x29x4_dslenderX2 restart gx3 20x2x5x4x30 dsectrobin,short restart_gx3_4x2x25x29x4_dslenderX2 -restart gx3 16x2x5x10x20 drakeX2 restart_gx3_4x2x25x29x4_dslenderX2 +restart gx3 16x2x5x10 drakeX2 restart_gx3_4x2x25x29x4_dslenderX2 restart gx3 8x2x8x10x20 droundrobin,maskhalo restart_gx3_4x2x25x29x4_dslenderX2 restart gx3 1x4x25x29x16 droundrobin restart_gx3_4x2x25x29x4_dslenderX2 restart gx3 1x8x30x20x32 droundrobin restart_gx3_4x2x25x29x4_dslenderX2 @@ -23,28 +25,29 @@ restart gx3 16x2x2x2x200 droundrobin restart_gx3_4x2x25x2 restart gx3 16x2x3x3x100 droundrobin restart_gx3_4x2x25x29x4_dslenderX2 restart gx3 16x2x8x8x80 dspiralcenter restart_gx3_4x2x25x29x4_dslenderX2 restart gx3 10x1x10x29x4 dsquarepop,thread restart_gx3_4x2x25x29x4_dslenderX2 -restart gx3 8x1x25x29x4 drakeX2,thread restart_gx3_4x2x25x29x4_dslenderX2 +restart gx3 8x1x25x29 drakeX2,thread restart_gx3_4x2x25x29x4_dslenderX2 -smoke gx3 4x2x25x29x4 debug,run2day,dslenderX2 -smoke gx1 64x1x16x16x10 debug,run2day,dwghtfile -smoke gbox180 16x1x6x6x60 debug,run2day,dspacecurve,debugblocks -smoke gx3 1x1x25x58x8 debug,run2day,droundrobin,thread smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 20x1x5x116x1 debug,run2day,dslenderX1,thread smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 6x2x4x29x18 debug,run2day,dspacecurve smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 8x2x10x12x16 debug,run2day,droundrobin smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 6x2x50x58x1 debug,run2day,droundrobin smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 5x2x33x23x4 debug,run2day,droundrobin smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 4x2x19x19x10 debug,run2day,droundrobin smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 20x2x5x4x30 debug,run2day,dsectrobin,short smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 16x2x5x10x20 debug,run2day,drakeX2 smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 8x2x8x10x20 debug,run2day,droundrobin,maskhalo smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 1x6x25x29x16 debug,run2day,droundrobin smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 1x8x30x20x32 debug,run2day,droundrobin smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 1x1x120x125x1 debug,run2day,droundrobin,thread smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 16x2x1x1x800 debug,run2day,droundrobin smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 16x2x2x2x200 debug,run2day,droundrobin smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 16x2x3x3x100 debug,run2day,droundrobin smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 16x2x8x8x80 debug,run2day,dspiralcenter smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 10x1x10x29x4 debug,run2day,dsquarepop,thread smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day -smoke gx3 8x1x25x29x4 debug,run2day,drakeX2,thread smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day +smoke gx3 4x2x25x29 debug,run2day,dslenderX2 +smoke gx1 64x1x16x16 debug,run2day,dwghtfile +smoke gx1 32x2x10x12 debug,run2day,dsectcart +smoke gbox180 16x1x6x6 debug,run2day,dspacecurve,debugblocks +smoke gx3 1x1x25x58 debug,run2day,droundrobin,thread smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 20x1x5x116 debug,run2day,dslenderX1,thread smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 6x2x4x29 debug,run2day,dspacecurve smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 8x2x10x12x18 debug,run2day,droundrobin smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 6x2x50x58 debug,run2day,droundrobin smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 5x2x33x23 debug,run2day,droundrobin smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 4x2x19x19x10 debug,run2day,droundrobin smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 20x2x5x4 debug,run2day,dsectrobin,short smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 16x2x5x10 debug,run2day,drakeX2 smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 8x2x8x10x20 debug,run2day,droundrobin,maskhalo smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 1x6x25x29x16 debug,run2day,droundrobin smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 1x8x30x20x32 debug,run2day,droundrobin smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 1x1x120x125x1 debug,run2day,droundrobin,thread smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 16x2x1x1x800 debug,run2day,droundrobin smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 16x2x2x2x200 debug,run2day,droundrobin smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 16x2x3x3x100 debug,run2day,droundrobin smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 16x2x8x8 debug,run2day,dspiralcenter smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 10x1x10x29 debug,run2day,dsquarepop,thread smoke_gx3_4x2x25x29_debug_dslenderX2_run2day +smoke gx3 8x1x25x29 debug,run2day,drakeX2,thread smoke_gx3_4x2x25x29_debug_dslenderX2_run2day diff --git a/configuration/scripts/tests/first_suite.ts b/configuration/scripts/tests/first_suite.ts index bef24d9eb..208c786f8 100644 --- a/configuration/scripts/tests/first_suite.ts +++ b/configuration/scripts/tests/first_suite.ts @@ -2,18 +2,18 @@ smoke gx3 8x2 diag1,run5day # decomp_suite restart gx3 4x2x25x29x4 dslenderX2 -smoke gx3 4x2x25x29x4 debug,run2day,dslenderX2 +smoke gx3 4x2x25x29 debug,run2day,dslenderX2 # reprosum_suite smoke gx3 4x2x25x29x4 dslenderX2,diag1,reprosum # travis_suite smoke gx3 1x2 run2day # gridsys_suite -smoke gx3 1x1x100x116x1 reprosum,run10day -smoke gx1 32x1x16x16x32 reprosum,run10day -smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd -smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd -smoke gx3 1x1x100x116x1 reprosum,run10day,gridc -smoke gx1 32x1x16x16x32 reprosum,run10day,gridc +smoke gx3 1x1x100x116 reprosum,run10day +smoke gx1 32x1x16x16 reprosum,run10day +smoke gx3 1x1x100x116 reprosum,run10day,gridcd +smoke gx1 32x1x16x16 reprosum,run10day,gridcd +smoke gx3 1x1x100x116 reprosum,run10day,gridc +smoke gx1 32x1x16x16 reprosum,run10day,gridc # perf_suite -smoke gx1 32x1x16x16x15 run2day,droundrobin -smoke gx1 64x1x16x16x8 run2day,droundrobin,thread +smoke gx1 32x1x16x16 run2day,droundrobin +smoke gx1 64x1x16x16 run2day,droundrobin,thread diff --git a/configuration/scripts/tests/gridsys_suite.ts b/configuration/scripts/tests/gridsys_suite.ts index e2731dd39..eca6497a4 100644 --- a/configuration/scripts/tests/gridsys_suite.ts +++ b/configuration/scripts/tests/gridsys_suite.ts @@ -1,17 +1,17 @@ # Test Grid PEs Sets BFB-compare -smoke gx3 1x1x100x116x1 reprosum,run10day -smoke gx1 32x1x16x16x32 reprosum,run10day -smoke gx3 1x1x100x116x1 reprosum,run10day,gridcd -smoke gx1 32x1x16x16x32 reprosum,run10day,gridcd -smoke gx3 1x1x100x116x1 reprosum,run10day,gridc -smoke gx1 32x1x16x16x32 reprosum,run10day,gridc +smoke gx3 1x1x100x116 reprosum,run10day +smoke gx1 32x1x16x16 reprosum,run10day +smoke gx3 1x1x100x116 reprosum,run10day,gridcd +smoke gx1 32x1x16x16 reprosum,run10day,gridcd +smoke gx3 1x1x100x116 reprosum,run10day,gridc +smoke gx1 32x1x16x16 reprosum,run10day,gridc smoke gx3 8x2 diag1,run5day smoke gx3 8x4 diag1,run5day,debug restart gx3 4x2 debug,diag1 restart2 gx1 16x2 debug,diag1 restart tx1 40x2 diag1 -smoke gbox12 1x1x12x12x1 boxchan +smoke gbox12 1x1x12x12 boxchan smoke gbox80 4x2 boxchan1e,debug smoke gbox80 8x1 boxchan1n smoke gbox80 1x1 box2001 @@ -22,19 +22,19 @@ smoke gbox80 4x2 boxclosed,boxforcee,run1day smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid -smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day -smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116x1_reprosum_run10day -smoke gx3 1x1x5x4x580 reprosum,run10day smoke_gx3_1x1x100x116x1_reprosum_run10day -smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16x32_reprosum_run10day -smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16x32_reprosum_run10day -smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest smoke_gx1_32x1x16x16x32_reprosum_run10day +smoke gx3 1x1x25x29 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116_reprosum_run10day +smoke gx3 1x1x5x4 reprosum,run10day,dwblockall smoke_gx3_1x1x100x116_reprosum_run10day +smoke gx3 1x1x5x4 reprosum,run10day smoke_gx3_1x1x100x116_reprosum_run10day +smoke gx1 32x1x16x16 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16_reprosum_run10day +smoke gx1 32x1x16x12 reprosum,run10day,cmplogrest,dwblockall smoke_gx1_32x1x16x16_reprosum_run10day +smoke gx1 32x1x16x12 reprosum,run10day,cmplogrest smoke_gx1_32x1x16x16_reprosum_run10day smoke gx3 8x2 diag1,run5day,gridcd smoke gx3 8x4 diag1,run5day,debug,gridcd restart gx3 4x2 debug,diag1,gridcd restart2 gx1 16x2 debug,diag1,gridcd restart tx1 40x2 diag1,gridcd -smoke gbox12 1x1x12x12x1 boxchan,gridcd +smoke gbox12 1x1x12x12 boxchan,gridcd smoke gbox80 4x2 boxchan1e,debug,gridcd smoke gbox80 8x1 boxchan1n,gridcd smoke gbox80 1x1 box2001,gridcd @@ -45,19 +45,19 @@ smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridcd smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridcd smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridcd smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid,gridcd -smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day -smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day -smoke gx3 1x1x5x4x580 reprosum,run10day,gridcd smoke_gx3_1x1x100x116x1_gridcd_reprosum_run10day -smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day -smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day -smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridcd smoke_gx1_32x1x16x16x32_gridcd_reprosum_run10day +smoke gx3 1x1x25x29 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116_gridcd_reprosum_run10day +smoke gx3 1x1x5x4 reprosum,run10day,dwblockall,gridcd smoke_gx3_1x1x100x116_gridcd_reprosum_run10day +smoke gx3 1x1x5x4 reprosum,run10day,gridcd smoke_gx3_1x1x100x116_gridcd_reprosum_run10day +smoke gx1 32x1x16x16 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16_gridcd_reprosum_run10day +smoke gx1 32x1x16x12 reprosum,run10day,cmplogrest,dwblockall,gridcd smoke_gx1_32x1x16x16_gridcd_reprosum_run10day +smoke gx1 32x1x16x12 reprosum,run10day,cmplogrest,gridcd smoke_gx1_32x1x16x16_gridcd_reprosum_run10day smoke gx3 8x2 diag1,run5day,gridc smoke gx3 8x4 diag1,run5day,debug,gridc restart gx3 4x2 debug,diag1,gridc restart2 gx1 16x2 debug,diag1,gridc restart tx1 40x2 diag1,gridc -smoke gbox12 1x1x12x12x1 boxchan,gridc +smoke gbox12 1x1x12x12 boxchan,gridc smoke gbox80 4x2 boxchan1e,debug,gridc smoke gbox80 8x1 boxchan1n,gridc smoke gbox80 1x1 box2001,gridc @@ -68,9 +68,9 @@ smoke gbox80 4x2 boxclosed,boxforcee,run1day,gridc smoke gbox80 4x1 boxclosed,boxforcene,run1day,kmtislands,gridc smoke gbox80 4x2 boxopen,kmtislands,boxforcee,run1day,gridc smoke gbox80 2x2 boxclosed,boxforcen,run1day,vargrid,gridc -smoke gx3 1x1x25x29x16 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day -smoke gx3 1x1x5x4x580 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day -smoke gx3 1x1x5x4x580 reprosum,run10day,gridc smoke_gx3_1x1x100x116x1_gridc_reprosum_run10day -smoke gx1 32x1x16x16x32 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day -smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day -smoke gx1 32x1x16x12x40 reprosum,run10day,cmplogrest,gridc smoke_gx1_32x1x16x16x32_gridc_reprosum_run10day +smoke gx3 1x1x25x29 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116_gridc_reprosum_run10day +smoke gx3 1x1x5x4 reprosum,run10day,dwblockall,gridc smoke_gx3_1x1x100x116_gridc_reprosum_run10day +smoke gx3 1x1x5x4 reprosum,run10day,gridc smoke_gx3_1x1x100x116_gridc_reprosum_run10day +smoke gx1 32x1x16x16 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16_gridc_reprosum_run10day +smoke gx1 32x1x16x12 reprosum,run10day,cmplogrest,dwblockall,gridc smoke_gx1_32x1x16x16_gridc_reprosum_run10day +smoke gx1 32x1x16x12 reprosum,run10day,cmplogrest,gridc smoke_gx1_32x1x16x16_gridc_reprosum_run10day diff --git a/configuration/scripts/tests/perf_suite.ts b/configuration/scripts/tests/perf_suite.ts index a4d8ef588..a7da95390 100644 --- a/configuration/scripts/tests/perf_suite.ts +++ b/configuration/scripts/tests/perf_suite.ts @@ -1,29 +1,29 @@ # Test Grid PEs Sets BFB-compare -smoke gx1 32x1x16x16x15 run2day,droundrobin -smoke gx1 64x1x16x16x8 run2day,droundrobin,thread +smoke gx1 32x1x16x16 run2day,droundrobin +smoke gx1 64x1x16x16 run2day,droundrobin,thread # -smoke gx1 1x1x320x384x1 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day -smoke gx1 1x1x160x192x4 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day -smoke gx1 1x1x80x96x16 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day -smoke gx1 1x1x40x48x64 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day -smoke gx1 1x1x20x24x256 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 1x1x320x384 run2day,droundrobin smoke_gx1_32x1x16x16_droundrobin_run2day +smoke gx1 1x1x160x192 run2day,droundrobin smoke_gx1_32x1x16x16_droundrobin_run2day +smoke gx1 1x1x80x96 run2day,droundrobin smoke_gx1_32x1x16x16_droundrobin_run2day +smoke gx1 1x1x40x48 run2day,droundrobin smoke_gx1_32x1x16x16_droundrobin_run2day +smoke gx1 1x1x20x24 run2day,droundrobin smoke_gx1_32x1x16x16_droundrobin_run2day # -smoke gx1 1x1x16x16x480 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day -smoke gx1 2x1x16x16x240 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day -smoke gx1 4x1x16x16x120 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day -smoke gx1 8x1x16x16x60 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day -smoke gx1 16x1x16x16x30 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day -#smoke gx1 32x1x16x16x15 run2day,droundrobin -smoke gx1 64x1x16x16x8 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day -smoke gx1 128x1x16x16x4 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day +smoke gx1 1x1x16x16 run2day,droundrobin smoke_gx1_32x1x16x16_droundrobin_run2day +smoke gx1 2x1x16x16 run2day,droundrobin smoke_gx1_32x1x16x16_droundrobin_run2day +smoke gx1 4x1x16x16 run2day,droundrobin smoke_gx1_32x1x16x16_droundrobin_run2day +smoke gx1 8x1x16x16 run2day,droundrobin smoke_gx1_32x1x16x16_droundrobin_run2day +smoke gx1 16x1x16x16 run2day,droundrobin smoke_gx1_32x1x16x16_droundrobin_run2day +#smoke gx1 32x1x16x16 run2day,droundrobin +smoke gx1 64x1x16x16 run2day,droundrobin smoke_gx1_32x1x16x16_droundrobin_run2day +smoke gx1 128x1x16x16 run2day,droundrobin smoke_gx1_32x1x16x16_droundrobin_run2day # -smoke gx1 64x1x16x16x8 run2day,droundrobin smoke_gx1_32x1x16x16x15_droundrobin_run2day -#smoke gx1 64x1x16x16x8 run2day,droundrobin,thread -smoke gx1 32x2x16x16x16 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread -smoke gx1 16x4x16x16x32 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread -smoke gx1 8x8x16x16x64 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread -smoke gx1 4x16x16x16x128 run2day,droundrobin smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread -smoke gx1 32x2x16x16x16 run2day,droundrobin,ompscheds smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread -smoke gx1 32x2x16x16x16 run2day,droundrobin,ompschedd1 smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread -smoke gx1 32x2x16x16x16 run2day,droundrobin,ompscheds1 smoke_gx1_64x1x16x16x8_droundrobin_run2day_thread +smoke gx1 64x1x16x16 run2day,droundrobin smoke_gx1_32x1x16x16_droundrobin_run2day +#smoke gx1 64x1x16x16 run2day,droundrobin,thread +smoke gx1 32x2x16x16 run2day,droundrobin smoke_gx1_64x1x16x16_droundrobin_run2day_thread +smoke gx1 16x4x16x16 run2day,droundrobin smoke_gx1_64x1x16x16_droundrobin_run2day_thread +smoke gx1 8x8x16x16 run2day,droundrobin smoke_gx1_64x1x16x16_droundrobin_run2day_thread +smoke gx1 4x16x16x16 run2day,droundrobin smoke_gx1_64x1x16x16_droundrobin_run2day_thread +smoke gx1 32x2x16x16 run2day,droundrobin,ompscheds smoke_gx1_64x1x16x16_droundrobin_run2day_thread +smoke gx1 32x2x16x16 run2day,droundrobin,ompschedd1 smoke_gx1_64x1x16x16_droundrobin_run2day_thread +smoke gx1 32x2x16x16 run2day,droundrobin,ompscheds1 smoke_gx1_64x1x16x16_droundrobin_run2day_thread # diff --git a/configuration/scripts/tests/prod_suite.ts b/configuration/scripts/tests/prod_suite.ts index 877fa1ce6..5e62e94ea 100644 --- a/configuration/scripts/tests/prod_suite.ts +++ b/configuration/scripts/tests/prod_suite.ts @@ -1,6 +1,6 @@ # Test Grid PEs Sets BFB-compare qcchk gx3 72x1 qc,qcchk,medium qcchk_gx3_72x1_medium_qc_qcchk qcchk gx1 144x1 qc,qcchk,medium -smoke gx1 144x2 gx1prod,long,run10year +smoke gx1 128x2 gx1prod,long,run8year qcchk gx3 72x1 qc,qcchkf,medium,alt02 qcchk_gx3_72x1_medium_qc_qcchk qcchk gx3 72x1 qc,qcchk,dt3456s,medium qcchk_gx3_72x1_medium_qc_qcchk diff --git a/configuration/scripts/tests/unittest_suite.ts b/configuration/scripts/tests/unittest_suite.ts index 840fc822e..779e218ff 100644 --- a/configuration/scripts/tests/unittest_suite.ts +++ b/configuration/scripts/tests/unittest_suite.ts @@ -15,21 +15,34 @@ unittest gx1 28x1 gridavgchk,dwblockall unittest gx1 16x2 gridavgchk unittest gbox128 8x2 gridavgchk unittest gbox80 1x1x10x10x80 halochk,cyclic,debug +unittest gbox80 1x1x10x10 halochk,cyclic,debug unittest gbox80 1x1x24x23x16 halochk +unittest gbox80 1x1x24x23 halochk unittest gbox80 1x1x23x24x16 halochk,cyclic +unittest gbox80 1x1x23x24 halochk,cyclic unittest gbox80 1x1x23x23x16 halochk,open +unittest gbox80 1x1x23x23 halochk,open unittest tx1 1x1x90x60x16 halochk,dwblockall +unittest tx1 1x1x90x60 halochk,dwblockall unittest tx1 1x1x90x60x16 halochk,dwblockall,tripolet +unittest tx1 1x1x90x60 halochk,dwblockall,tripolet unittest tx1 1x1x95x65x16 halochk,dwblockall +unittest tx1 1x1x95x65 halochk,dwblockall unittest tx1 1x1x95x65x16 halochk,dwblockall,tripolet +unittest tx1 1x1x95x65 halochk,dwblockall,tripolet unittest gx3 4x2 halochk,dwblockall,debug unittest gx3 8x2x16x12x10 halochk,cyclic,dwblockall +unittest gx3 8x2x16x12 halochk,cyclic,dwblockall unittest gx3 17x1x16x12x10 halochk,open,dwblockall +unittest gx3 17x1x16x12 halochk,open,dwblockall unittest tx1 4x2 halochk,dwblockall unittest tx1 4x2 halochk,dwblockall,tripolet unittest tx1 4x2x65x45x10 halochk,dwblockall +unittest tx1 4x2x65x45 halochk,dwblockall unittest tx1 4x2x57x43x12 halochk,dwblockall,tripolet +unittest tx1 4x2x57x43 halochk,dwblockall,tripolet unittest gx3 1x1 optargs unittest gx3 1x1 opticep unittest gx3 4x2x25x29x4 debug,run2day,dslenderX2,opticep,cmplog smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day +unittest gx3 4x2x25x29 debug,run2day,dslenderX2,opticep,cmplog smoke_gx3_4x2x25x29x4_debug_dslenderX2_run2day unittest gx3 8x2 diag1,run5day,opticep,cmplog smoke_gx3_8x2_diag1_run5day diff --git a/configuration/scripts/timeseries.csh b/configuration/scripts/timeseries.csh deleted file mode 100755 index b6b3fcf2e..000000000 --- a/configuration/scripts/timeseries.csh +++ /dev/null @@ -1,129 +0,0 @@ -#!/bin/csh - -# Check to see if test case directory was passed -if ( $1 == "-h" ) then - echo "To generate timeseries plots, this script can be passed a directory" - echo "containing a logs/ subdirectory, or it can be run in the directory with" - echo "the log files, without being passed a directory." - echo "Example: ./timeseries.csh ./annual_gx3_conrad_4x1.t00" - echo "Example: ./timeseries.csh" - echo "It will pull the diagnostic data from the most recently modified log file." - exit -1 -endif -set basename = `echo $1 | sed -e 's#/$##' | sed -e 's/^\.\///'` - -# Set x-axis limits - # Manually set x-axis limits -#set xrange = 'set xrange ["19980101":"19981231"]' - # Let gnuplot determine x-axis limits -set xrange = '' - -# Determine if BASELINE dataset exists -if ( $1 == "" ) then -set basefile_dir = "IGNORE" -else -source $1/cice.settings -set basefile_dir = "$ICE_BASELINE/$ICE_BASECOM/$ICE_TESTNAME" -endif - -if ( -d $basefile_dir ) then - set num_basefile = `ls $basefile_dir | grep cice.runlog | wc -l` - if ( $num_basefile > 0 ) then - set baseline_exists = 1 - foreach file ($basefile_dir/cice.runlog.*) - set base_logfile = $file - end - else - set baseline_exists = 0 - endif -else - set baseline_exists = 0 -endif - -set fieldlist=("total ice area (km^2)" \ - "total ice extent(km^2)" \ - "total ice volume (m^3)" \ - "total snw volume (m^3)" \ - "rms ice speed (m/s)" ) - -# Get the filename for the latest log -if ( $1 == "" ) then -foreach file (./cice.runlog.*) - set logfile = $file -end -else -foreach file ($1/logs/cice.runlog.*) - set logfile = $file -end -endif - -# Loop through each field and create the plot -foreach field ($fieldlist:q) - # Add backslashes before (, ), and ^ for grep searches - set search_name = "`echo '$field' | sed 's/(/\\(/' | sed 's/)/\\)/' | sed 's/\^/\\^/'`" - set fieldname = `echo "$field" | sed -e 's/([^()]*)//g'` - set search = "'$search_name'\|istep1" - rm -f data.txt - foreach line ("`egrep $search $logfile`") - if ("$line" =~ *"istep1"*) then - set argv = ( $line ) - set date = $4 - @ hour = ( $6 / 3600 ) - else - set data1 = `echo $line | rev | cut -d ' ' -f2 | rev` - set data2 = `echo $line | rev | cut -d ' ' -f1 | rev` - echo "$date-$hour,$data1,$data2" >> data.txt - endif - end - set format = "%Y%m%d-%H" - - set output = `echo $fieldname | sed 's/ /_/g'` - set output = "${output}_${ICE_CASENAME}.png" - - echo "Plotting data for '$fieldname' and saving to $output" - -# Call the plotting routine, which uses the data in the data.txt file -gnuplot << EOF > $output -# Plot style -set style data points - -set datafile separator "," - -# Term type and background color, canvas size -set terminal png size 1920,960 - -# x-axis -set xdata time -set timefmt "$format" -set format x "%Y/%m/%d" - -# Axis tick marks -set xtics rotate - -set title "$field (Diagnostic Output)" -set ylabel "$field" -set xlabel "Simulation Day" - -set key left top - -# Set x-axlis limits -$xrange - -if ( $baseline_exists == 1 ) \ - plot "data_baseline.txt" using (timecolumn(1)):2 with lines lw 2 lt 2 lc 2 title \ - "Arctic - Baseline", \ - "" using (timecolumn(1)):3 with lines lw 2 lt 2 lc 5 title "Antarctic - Baseline", \ - "data.txt" using (timecolumn(1)):2 with lines lw 2 lt 1 lc 1 title "Arctic", \ - "" using (timecolumn(1)):3 with lines lw 2 lt 1 lc 3 title "Antarctic"; \ -else \ - plot "data.txt" using (timecolumn(1)):2 with lines lw 2 lt 1 lc 1 title "Arctic", \ - "" using (timecolumn(1)):3 with lines lw 2 lt 1 lc 3 title "Antarctic" \ - -EOF - -# Delete the data file -rm -f data.txt -if ( $baseline_exists ) then - rm -f data_baseline.txt -endif -end diff --git a/configuration/scripts/timeseries.py b/configuration/scripts/timeseries.py index 2c36cea73..c53106071 100755 --- a/configuration/scripts/timeseries.py +++ b/configuration/scripts/timeseries.py @@ -1,4 +1,4 @@ -#!/usr/bin/env python +#!/usr/bin/env python3 ''' This script generates timeseries plots of CICE diagnostic output. @@ -51,16 +51,16 @@ def get_data(logfile,field): logger.debug('Extracting data for {}'.format(field)) # Build the regular expression to extract the data - field_regex = field.replace('(','\(').replace('^','\^').replace(')','\)') - number_regex = '[-+]?\d+\.?\d+([eE][-+]?\d+)?' - my_regex = '^{}\s+=\s+({})\s+({})'.format(field_regex,number_regex,number_regex) + field_regex = field.replace('(','\\(').replace('^','\\^').replace(')','\\)') + number_regex = r'[-+]?\d+\.?\d+([eE][-+]?\d+)?' + my_regex = r'^{}\s+=\s+({})\s+({})'.format(field_regex,number_regex,number_regex) dtg = [] arctic = [] antarctic = [] with open(logfile) as f: for line in f.readlines(): - m1 = re.search('istep1:\s+(\d+)\s+idate:\s+(\d+)\s+sec:\s+(\d+)', line) + m1 = re.search(r'istep1:\s+(\d+)\s+idate:\s+(\d+)\s+sec:\s+(\d+)', line) if m1: # Extract the current date-time group from the file date = m1.group(2) @@ -83,6 +83,11 @@ def get_data(logfile,field): antarctic.append(float(m.group(3))) logger.debug(' Arctic = {}, Antarctic = {}'.format(arctic[-1], antarctic[-1])) + # remove first few elements of dtg + if len(dtg) > len(arctic): + stind = len(dtg) - len(arctic) + dtg = dtg[stind:] + return dtg, arctic, antarctic, expon def latexit(string): @@ -90,15 +95,17 @@ def latexit(string): return (s.replace(')','$)',1))[::-1] def plot_timeseries(log, field, dtg, arctic, antarctic, expon, dtg_base=None, arctic_base=None, \ - antarctic_base=None, base_dir=None, grid=False): + antarctic_base=None, base_dir=None, grid=False, casename=None, base_casename=None): ''' Plot the timeseries data from the CICE log file ''' import re - casename = re.sub(r"/logs", "", os.path.abspath(log).rstrip('/')).split('/')[-1] + if casename is None: + casename = re.sub(r"/logs", "", os.path.abspath(log).rstrip('/')).split('/')[-1] if base_dir: - base_casename = re.sub(r"/logs", "", os.path.abspath(base_dir).rstrip('/')).split('/')[-1] + if base_casename is None: + base_casename = re.sub(r"/logs", "", os.path.abspath(base_dir).rstrip('/')).split('/')[-1] # Load the plotting libraries, but set the logging level for matplotlib # to WARNING so that matplotlib debugging info is not printed when running @@ -108,7 +115,8 @@ def plot_timeseries(log, field, dtg, arctic, antarctic, expon, dtg_base=None, ar import matplotlib.dates as mdates import matplotlib.ticker as ticker - fig = plt.figure(figsize=(12,8)) +# fig = plt.figure(figsize=(12,8)) + fig = plt.figure(figsize=(6,4)) ax = fig.add_axes([0.05,0.08,0.9,0.9]) # Add the arctic data to the plot @@ -132,55 +140,54 @@ def plot_timeseries(log, field, dtg, arctic, antarctic, expon, dtg_base=None, ar ax.xaxis.set_minor_locator(mdates.MonthLocator()) # Add a text box that prints the test case name and the baseline case name (if given) - try: - text_field = "Test/Case: {}\nBaseline: {}".format(casename,base_casename) - from matplotlib.offsetbox import AnchoredText - anchored_text = AnchoredText(text_field,loc=2) - ax.add_artist(anchored_text) - except: - text_field = "Test/Case: {}".format(casename) - from matplotlib.offsetbox import AnchoredText - anchored_text = AnchoredText(text_field,loc=2) - ax.add_artist(anchored_text) + if base_casename is None: + text_field = "{}".format(casename) + else: + text_field = "{}\n{}".format(casename,base_casename) + + from matplotlib.offsetbox import AnchoredText + anchored_text = AnchoredText(text_field,loc='upper left') + anchored_text.patch.set_alpha(0.5) + ax.add_artist(anchored_text) - ax.legend(loc='upper right') + ax.legend(loc='upper right',framealpha=0.5) # Add grid lines if the `--grid` argument was passed at the command line. if grid: ax.grid(ls='--') # Reduce the number of ticks on the y axis - nbins = 10 - try: - minval = min( \ - min(min(arctic), min(antarctic)), \ - min(min(arctic_base), min(antarctic_base))) - maxval = max( \ - max(max(arctic), max(antarctic)), \ - max(max(arctic_base), max(antarctic_base))) - except: - minval = min(min(arctic), min(antarctic)) - maxval = max(max(arctic), max(antarctic)) - step = (maxval-minval)/nbins - ax.yaxis.set_ticks(np.arange(minval, maxval+step, step)) +# nbins = 10 +# try: +# minval = min( \ +# min(min(arctic), min(antarctic)), \ +# min(min(arctic_base), min(antarctic_base))) +# maxval = max( \ +# max(max(arctic), max(antarctic)), \ +# max(max(arctic_base), max(antarctic_base))) +# except: +# minval = min(min(arctic), min(antarctic)) +# maxval = max(max(arctic), max(antarctic)) +# step = (maxval-minval)/nbins +# ax.yaxis.set_ticks(np.arange(minval, maxval+step, step)) # Format the y-axis tick labels, based on whether or not the values in the log file # are in scientific notation or float notation. if expon: - ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.3e')) + ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.1e')) else: - ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.5f')) + ax.yaxis.set_major_formatter(ticker.FormatStrFormatter('%0.3f')) # Rotate and right align the x labels for tick in ax.get_xticklabels(): - tick.set_rotation(45) + tick.set_rotation(30) # Create an output file and save the figure field_tmp = field.split('(')[0].rstrip() try: - outfile = '{}_{}_base-{}.png'.format(field_tmp.replace(' ','_'), casename,base_casename) + outfile = '{}_{}_base-{}.png'.format(field_tmp.replace(' ','_'),casename.replace(' ','_'),base_casename.replace(' ','_')) except: - outfile = '{}_{}.png'.format(field_tmp.replace(' ','_'), casename) + outfile = '{}_{}.png'.format(field_tmp.replace(' ','_'), casename.replace(' ','_')) logger.info('Saving file to {}'.format(outfile)) plt.savefig(outfile,dpi=300,bbox_inches='tight') @@ -204,6 +211,10 @@ def main(): dataset, if desired. A specific log file or case directory can \ be passed. If a directory is passed, the most recent log file \ will be used.') + parser.add_argument('--case', dest='casename', help='User specified casename for plots.', \ + action='store') + parser.add_argument('--basecase', dest='base_casename', help='User specified base casename \ + for plots.', action='store') parser.add_argument('-v', '--verbose', dest='verbose', help='Print debug output?', \ action='store_true') parser.add_argument('--area', dest='area', help='Create a plot for total ice area?', \ @@ -227,6 +238,8 @@ def main(): parser.set_defaults(snow_volume=False) parser.set_defaults(speed=False) parser.set_defaults(grid=False) + parser.set_defaults(casename=None) + parser.set_defaults(base_casename=None) args = parser.parse_args() @@ -268,7 +281,7 @@ def main(): logger.debug('{} is a file'.format(args.log_dir)) log = args.log_dir log_dir = args.log_dir.rsplit('/',1)[0] - logger.info('Log file = {}'.format(log)) + if args.base_dir: if os.path.isdir(args.base_dir): base_log = find_logfile(args.base_dir) @@ -278,6 +291,9 @@ def main(): base_dir = args.base_dir.rsplit('/',1)[0] logger.info('Base Log file = {}'.format(base_log)) + logger.info('casename = {}'.format(args.casename)) + logger.info('Log file = {}'.format(log)) + # Loop through each field and create the plot for field in fieldlist: logger.debug('Current field = {}'.format(field)) @@ -290,9 +306,11 @@ def main(): # Plot the data if args.base_dir: plot_timeseries(log_dir, field, dtg, arctic, antarctic, expon, dtg_base, \ - arctic_base, antarctic_base, base_dir, grid=args.grid) + arctic_base, antarctic_base, base_dir, grid=args.grid, \ + casename=args.casename, base_casename=args.base_casename) else: - plot_timeseries(log_dir, field, dtg, arctic, antarctic, expon, grid=args.grid) + plot_timeseries(log_dir, field, dtg, arctic, antarctic, expon, grid=args.grid, \ + casename=args.casename) if __name__ == "__main__": main() diff --git a/doc/source/conf.py b/doc/source/conf.py index 0e7ce0886..fec9406c0 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -57,7 +57,7 @@ # General information about the project. project = u'CICE' -copyright = u'2023, Triad National Security, LLC (code) and National Center for Atmospheric Research (documentation)' +copyright = u'2024, Triad National Security, LLC (code) and National Center for Atmospheric Research (documentation)' author = u'CICE-Consortium' # The version info for the project you're documenting, acts as replacement for @@ -65,9 +65,9 @@ # built documents. # # The short X.Y version. -version = u'6.5.0' +version = u'6.5.1' # The full version, including alpha/beta/rc tags. -version = u'6.5.0' +version = u'6.5.1' # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/developer_guide/dg_scripts.rst b/doc/source/developer_guide/dg_scripts.rst index dac5e9a52..d4b29acbb 100644 --- a/doc/source/developer_guide/dg_scripts.rst +++ b/doc/source/developer_guide/dg_scripts.rst @@ -22,6 +22,8 @@ The directory structure under configure/scripts is as follows. | **cice.run.setup.csh** sets up the run scripts | **cice.settings** defines environment, model configuration and run settings | **cice.test.setup.csh** creates configurations for testing the model +| **ciceplots.csh** general script to generate timeseries and 2d CICE plots +| **ciceplots2d.py** python script to generate 2d CICE plots | **ice_in** namelist input data | **machines/** machine specific files to set env and Macros | **makdep.c** determines module dependencies @@ -31,8 +33,7 @@ The directory structure under configure/scripts is as follows. | **parse_settings.sh** replaces settings with command-line configuration | **setup_run_dirs.csh** creates the case run directories | **set_version_number.csh** updates the model version number from the **cice.setup** command line -| **timeseries.csh** generates PNG timeseries plots from output files, using GNUPLOT -| **timeseries.py** generates PNG timeseries plots from output files, using Python +| **timeseries.py** python script to generate timeseries plots from CICE log file | **tests/** scripts for configuring and running basic tests .. _dev_strategy: diff --git a/doc/source/intro/copyright.rst b/doc/source/intro/copyright.rst index e477d9d57..8ddeef022 100644 --- a/doc/source/intro/copyright.rst +++ b/doc/source/intro/copyright.rst @@ -5,7 +5,7 @@ Copyright ============================= -© Copyright 2023, Triad National Security LLC. All rights reserved. +© Copyright 2024, Triad National Security LLC. All rights reserved. This software was produced under U.S. Government contract 89233218CNA000001 for Los Alamos National Laboratory (LANL), which is operated by Triad National Security, LLC for the U.S. Department diff --git a/doc/source/science_guide/sg_coupling.rst b/doc/source/science_guide/sg_coupling.rst index 666c13ed4..4e8168530 100644 --- a/doc/source/science_guide/sg_coupling.rst +++ b/doc/source/science_guide/sg_coupling.rst @@ -132,8 +132,7 @@ coefficient, :math:`\rho_w` is the density of seawater, and necessary if the top ocean model layers are not able to resolve the Ekman spiral in the boundary layer. If the top layer is sufficiently thin compared to the typical depth of the Ekman spiral, then -:math:`\theta=0` is a good approximation. Here we assume that the top -layer is thin enough. +:math:`\theta=0` is a good approximation. Please see the `Icepack documentation `_ for additional information about atmospheric and oceanic forcing and other data exchanged between the diff --git a/doc/source/science_guide/sg_dynamics.rst b/doc/source/science_guide/sg_dynamics.rst index 1ddf94472..978da7fcb 100644 --- a/doc/source/science_guide/sg_dynamics.rst +++ b/doc/source/science_guide/sg_dynamics.rst @@ -35,11 +35,11 @@ For clarity, the two components of Equation :eq:`vpmom` are \begin{aligned} m{\partial u\over\partial t} &= {\partial\sigma_{1j}\over\partial x_j} + \tau_{ax} + a_i c_w \rho_w - \left|{\bf U}_w - {\bf u}\right| \left[\left(U_w-u\right)\cos\theta - \left(V_w-v\right)\sin\theta\right] + \left|{\bf U}_w - {\bf u}\right| \left[\left(U_w-u\right)\cos\theta \mp \left(V_w-v\right)\sin\theta\right] -C_bu +mfv - mg{\partial H_\circ\over\partial x}, \\ m{\partial v\over\partial t} &= {\partial\sigma_{2j}\over\partial x_j} + \tau_{ay} + a_i c_w \rho_w - \left|{\bf U}_w - {\bf u}\right| \left[\left(U_w-u\right)\sin\theta + \left(V_w-v\right)\cos\theta\right] + \left|{\bf U}_w - {\bf u}\right| \left[ \pm \left(U_w-u\right)\sin\theta + \left(V_w-v\right)\cos\theta\right] -C_bv-mfu - mg{\partial H_\circ\over\partial y}. \end{aligned} :label: momsys @@ -121,18 +121,18 @@ variables used in the code. .. math:: \underbrace{\left({m\over\Delta t_e}+{\tt vrel} \cos\theta\ + C_b \right)}_{\tt cca} u^{k+1} - - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb}v^{l} + - \underbrace{\left(mf \pm {\tt vrel}\sin\theta\right)}_{\tt ccb}v^{l} = &\underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} + \underbrace{\tau_{ax} - mg{\partial H_\circ\over\partial x} }_{\tt forcex} \\ - &+ {\tt vrel}\underbrace{\left(U_w\cos\theta-V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t_e}u^k, + &+ {\tt vrel}\underbrace{\left(U_w\cos\theta \mp V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t_e}u^k, :label: umom .. math:: - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{l} + \underbrace{\left(mf \pm {\tt vrel}\sin\theta\right)}_{\tt ccb} u^{l} + \underbrace{\left({m\over\Delta t_e}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca}v^{k+1} = &\underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} + \underbrace{\tau_{ay} - mg{\partial H_\circ\over\partial y} }_{\tt forcey} \\ - &+ {\tt vrel}\underbrace{\left(U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t_e}v^k, + &+ {\tt vrel}\underbrace{\left( \pm U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t_e}v^k, :label: vmom where :math:`{\tt vrel}\ \cdot\ {\tt waterx(y)}= {\tt taux(y)}` and the definitions of :math:`u^{l}` and :math:`v^{l}` vary depending on the grid. @@ -140,19 +140,19 @@ where :math:`{\tt vrel}\ \cdot\ {\tt waterx(y)}= {\tt taux(y)}` and the definiti As :math:`u` and :math:`v` are collocated on the B grid, :math:`u^{l}` and :math:`v^{l}` are respectively :math:`u^{k+1}` and :math:`v^{k+1}` such that this system of equations can be solved as follows. Define .. math:: - \hat{u} = F_u + \tau_{ax} - mg{\partial H_\circ\over\partial x} + {\tt vrel} \left(U_w\cos\theta - V_w\sin\theta\right) + {m\over\Delta t_e}u^k + \hat{u} = F_u + \tau_{ax} - mg{\partial H_\circ\over\partial x} + {\tt vrel} \left(U_w\cos\theta \mp V_w\sin\theta\right) + {m\over\Delta t_e}u^k :label: cevpuhat .. math:: - \hat{v} = F_v + \tau_{ay} - mg{\partial H_\circ\over\partial y} + {\tt vrel} \left(U_w\sin\theta + V_w\cos\theta\right) + {m\over\Delta t_e}v^k, + \hat{v} = F_v + \tau_{ay} - mg{\partial H_\circ\over\partial y} + {\tt vrel} \left(\pm U_w\sin\theta + V_w\cos\theta\right) + {m\over\Delta t_e}v^k, :label: cevpvhat where :math:`{\bf F} = \nabla\cdot\sigma^{k+1}`. Then .. math:: \begin{aligned} - \left({m\over\Delta t_e} +{\tt vrel}\cos\theta\ + C_b \right)u^{k+1} - \left(mf + {\tt vrel}\sin\theta\right) v^{k+1} &= \hat{u} \\ - \left(mf + {\tt vrel}\sin\theta\right) u^{k+1} + \left({m\over\Delta t_e} +{\tt vrel}\cos\theta + C_b \right)v^{k+1} &= \hat{v}.\end{aligned} + \left({m\over\Delta t_e} +{\tt vrel}\cos\theta\ + C_b \right)u^{k+1} - \left(mf \pm {\tt vrel}\sin\theta\right) v^{k+1} &= \hat{u} \\ + \left(mf \pm {\tt vrel}\sin\theta\right) u^{k+1} + \left({m\over\Delta t_e} +{\tt vrel}\cos\theta + C_b \right)v^{k+1} &= \hat{v}.\end{aligned} Solving simultaneously for :math:`u^{k+1}` and :math:`v^{k+1}`, @@ -168,7 +168,7 @@ where :label: cevpa .. math:: - b = mf + {\tt vrel}\sin\theta. + b = mf \pm {\tt vrel}\sin\theta. :label: cevpb Note that the time discretization and solution method for the EAP is exactly the same as for the B grid EVP. More details on the EAP model are given in Section :ref:`stress-eap`. @@ -191,20 +191,20 @@ implicit solvers and there is an additional term for the pseudo-time iteration. .. math:: {\beta^*(u^{k+1}-u^k)\over\Delta t_e} + {m(u^{k+1}-u^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)} u^{k+1} - - & {\left(mf+{\tt vrel}\sin\theta\right)} v^{l} + - & {\left(mf \pm {\tt vrel}\sin\theta\right)} v^{l} = {{\partial\sigma_{1j}^{k+1}\over\partial x_j}} + {\tau_{ax}} \\ & - {mg{\partial H_\circ\over\partial x} } - + {\tt vrel} {\left(U_w\cos\theta-V_w\sin\theta\right)}, + + {\tt vrel} {\left(U_w\cos\theta \mp V_w\sin\theta\right)}, :label: umomr .. math:: {\beta^*(v^{k+1}-v^k)\over\Delta t_e} + {m(v^{k+1}-v^n)\over\Delta t} + {\left({\tt vrel} \cos\theta + C_b \right)}v^{k+1} - + & {\left(mf+{\tt vrel}\sin\theta\right)} u^{l} + + & {\left(mf \pm {\tt vrel}\sin\theta\right)} u^{l} = {{\partial\sigma_{2j}^{k+1}\over\partial x_j}} + {\tau_{ay}} \\ & - {mg{\partial H_\circ\over\partial y} } - + {\tt vrel}{\left(U_w\sin\theta+V_w\cos\theta\right)}, + + {\tt vrel}{\left( \pm U_w\sin\theta+V_w\cos\theta\right)}, :label: vmomr where :math:`\beta^*` is a numerical parameter and :math:`u^n, v^n` are the components of the previous time level solution. @@ -212,18 +212,18 @@ With :math:`\beta=\beta^* \Delta t \left( m \Delta t_e \right)^{-1}` :cite:`Bou .. math:: \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta\ + C_b \right)}_{\tt cca} u^{k+1} - - \underbrace{\left(mf+{\tt vrel} \sin\theta\right)}_{\tt ccb} & v^{l} + - \underbrace{\left(mf \pm {\tt vrel} \sin\theta\right)}_{\tt ccb} & v^{l} = \underbrace{{\partial\sigma_{1j}^{k+1}\over\partial x_j}}_{\tt strintx} + \underbrace{\tau_{ax} - mg{\partial H_\circ\over\partial x} }_{\tt forcex} \\ - & + {\tt vrel}\underbrace{\left(U_w\cos\theta-V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t}(\beta u^k + u^n), + & + {\tt vrel}\underbrace{\left(U_w\cos\theta \mp V_w\sin\theta\right)}_{\tt waterx} + {m\over\Delta t}(\beta u^k + u^n), :label: umomr2 .. math:: - \underbrace{\left(mf+{\tt vrel}\sin\theta\right)}_{\tt ccb} u^{l} + \underbrace{\left(mf \pm {\tt vrel}\sin\theta\right)}_{\tt ccb} u^{l} + \underbrace{\left((\beta+1){m\over\Delta t}+{\tt vrel} \cos\theta + C_b \right)}_{\tt cca} & v^{k+1} = \underbrace{{\partial\sigma_{2j}^{k+1}\over\partial x_j}}_{\tt strinty} + \underbrace{\tau_{ay} - mg{\partial H_\circ\over\partial y} }_{\tt forcey} \\ - & + {\tt vrel}\underbrace{\left(U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t}(\beta v^k + v^n), + & + {\tt vrel}\underbrace{\left( \pm U_w\sin\theta+V_w\cos\theta\right)}_{\tt watery} + {m\over\Delta t}(\beta v^k + v^n), :label: vmomr2 At this point, the solutions :math:`u^{k+1}` and :math:`v^{k+1}` for the B or the C grids are obtained in the same manner as for the standard EVP approach (see Section :ref:`evp-momentum` for details). @@ -292,6 +292,8 @@ Ice-Ocean stress At the end of each (thermodynamic) time step, the ice–ocean stress must be constructed from :math:`{\tt taux(y)}` and the terms containing :math:`{\tt vrel}` on the left hand side of the equations. +The water stress calculation has a hemispheric dependence on the sign of the +:math:`\pm {\tt vrel}\sin\theta` term. The Hibler-Bryan form for the ice-ocean stress :cite:`Hibler87` is included in **ice\_dyn\_shared.F90** but is currently commented out, diff --git a/doc/source/user_guide/ug_case_settings.rst b/doc/source/user_guide/ug_case_settings.rst index 9f1f8a259..8e7b154db 100644 --- a/doc/source/user_guide/ug_case_settings.rst +++ b/doc/source/user_guide/ug_case_settings.rst @@ -33,7 +33,10 @@ can be found in :ref:`cicecpps`. The following CPPs are available. "ESMF_INTERFACE", "Turns on ESMF support in a subset of driver code. Also USE_ESMF_LIB and USE_ESMF_METADATA" "FORTRANUNDERSCORE", "Used in ice_shr_reprosum86.c to support Fortran-C interfaces. This should generally be turned on at all times. There are other CPPs (FORTRANDOUBULEUNDERSCORE, FORTRANCAPS, etc) in ice_shr_reprosum.c that are generally not used in CICE but could be useful if problems arise in the Fortran-C interfaces" "GPTL", "Turns on GPTL initialization if needed for PIO" + "NO_CDF2", "Turns off support for netcdf cdf2 (nf90_64bit_offset)" + "NO_CDF5", "Turns off support for netcdf cdf5 (nf90_64bit_data)" "NO_F2003", "Turns off some Fortran 2003 features" + "NO_HDF2", "Turns off support for netcdf hdf5 (netcdf4 including chunking and compression)" "NO_I8", "Converts integer*8 to integer*4. This could have adverse affects for certain algorithms including the ddpdd implementation associated with the ``bfbflag``" "NO_R16", "Converts real*16 to real*8. This could have adverse affects for certain algorithms including the lsum16 implementation associated with the ``bfbflag``" "NO_SNICARHC", "Does not compile hardcoded (HC) 5 band snicar tables tables needed by ``shortwave=dEdd_snicar_ad``. May reduce compile time." @@ -369,7 +372,9 @@ domain_nml "``maskhalo_remap``", "logical", "mask unused halo cells for transport", "``.false.``" "``maskhalo_bound``", "logical", "mask unused halo cells for boundary updates", "``.false.``" "``max_blocks``", "integer", "maximum number of blocks per MPI task for memory allocation", "-1" - "``nprocs``", "integer", "number of processors to use", "-1" + "", "``-1``", "find number of blocks per MPI task automatically", "" + "``nprocs``", "integer", "number of MPI tasks to use", "-1" + "", "``-1``", "find number of MPI tasks automatically", "" "``ns_boundary_type``", "``cyclic``", "periodic boundary conditions in y-direction", "``open``" "", "``open``", "Dirichlet boundary conditions in y", "" "", "``tripole``", "U-fold tripole boundary conditions in y", "" diff --git a/doc/source/user_guide/ug_implementation.rst b/doc/source/user_guide/ug_implementation.rst index 7d172e91d..91909082c 100644 --- a/doc/source/user_guide/ug_implementation.rst +++ b/doc/source/user_guide/ug_implementation.rst @@ -213,22 +213,24 @@ ghost cells, and the same numbering system is applied to each of the four subdomains. The user sets the ``NTASKS`` and ``NTHRDS`` settings in **cice.settings** -and chooses a block size ``block_size_x`` :math:`\times`\ ``block_size_y``, -``max_blocks``, and decomposition information ``distribution_type``, ``processor_shape``, -and ``distribution_type`` in **ice_in**. That information is used to -determine how the blocks are -distributed across the processors, and how the processors are -distributed across the grid domain. The model is parallelized over blocks +and chooses a block size, ``block_size_x`` :math:`\times`\ ``block_size_y``, +and decomposition information ``distribution_type``, ``processor_shape``, +and ``distribution_wgt`` in **ice_in**. +This information is used to determine how the blocks are +distributed across the processors. The model is parallelized over blocks for both MPI and OpenMP. Some suggested combinations for these parameters for best performance are given in Section :ref:`performance`. The script **cice.setup** computes some default decompositions and layouts -but the user can overwrite the defaults by manually changing the values in -`ice_in`. At runtime, the model will print decomposition +but the user can override the defaults by manually changing the values in +`ice_in`. The number of blocks per processor can vary, and this is computed +internally when the namelist ``max_blocks=-1``. ``max_blocks`` +can also be set by the user, although this may use extra memory and the +model will abort if ``max_blocks`` is set too small for the decomposition. +At runtime, the model will print decomposition information to the log file, and if the block size or max blocks is inconsistent with the task and thread size, the model will abort. The code will also print a warning if the maximum number of blocks is too large. -Although this is not fatal, it does use extra memory. If ``max_blocks`` is -set to -1, the code will compute a tentative ``max_blocks`` on the fly. +Although this is not fatal, it does use extra memory. A loop at the end of routine *create_blocks* in module **ice_blocks.F90** will print the locations for all of the blocks on @@ -1218,7 +1220,10 @@ and (https://github.com/NCAR/ParallelIO). netCDF requires CICE compilation with a netCDF library built externally. PIO requires CICE compilation with a PIO and netCDF library built externally. Both netCDF and PIO can be built with many options which may require additional libraries -such as MPI, hdf5, or pnetCDF. +such as MPI, hdf5, or pnetCDF. There are CPPs that will deprecate cdf2, +cdf5, and hdf5 support should the netcdf library be built without those features. +Those CPPs are ``NO_CDF2``, ``NO_CDF5``, and ``NO_HDF5``. Those can be added +to the Macros machine file explicity when needed. .. _history: diff --git a/doc/source/user_guide/ug_running.rst b/doc/source/user_guide/ug_running.rst index 9337b3c47..021c5bcbe 100644 --- a/doc/source/user_guide/ug_running.rst +++ b/doc/source/user_guide/ug_running.rst @@ -49,6 +49,7 @@ The Consortium has tested the following compilers at some point, - Intel ifort 2021.6.0 - Intel ifort 2021.8.0 - Intel ifort 2021.9.0 +- Intel ifort 2021.10.0 - Intel ifort 2022.2.1 - PGI 16.10.0 - PGI 19.9-0 @@ -86,6 +87,7 @@ The Consortium has tested the following MPI implementations and versions, - MPICH 8.1.14 - MPICH 8.1.21 - MPICH 8.1.25 +- MPICH 8.1.26 - Intel MPI 18.0.1 - Intel MPI 18.0.4 - Intel MPI 2019 Update 6 @@ -843,12 +845,13 @@ A few notes about the conda configuration: mpirun -np ${ntasks} --oversubscribe ./cice >&! \$ICE_RUNLOG_FILE - It is not recommeded to run other test suites than ``quick_suite`` or ``travis_suite`` on a personal computer. -- The conda environment is automatically activated when compiling or running the model using the ``./cice.build`` and ``./cice.run`` scripts in the case directory. These scripts source the file ``env.conda_{linux.macos}``, which calls ``conda activate cice``. +- If needed, the conda environment is automatically activated when compiling or running the model using the ``./cice.build`` and ``./cice.run`` scripts in the case directory. These scripts source the file ``env.conda_{linux.macos}``, which calls ``conda activate cice``. - To use the "cice" conda environment with the Python plotting (see :ref:`timeseries`) and quality control (QC) scripts (see :ref:`CodeValidation`), you must manually activate the environment: .. code-block:: bash cd ~/cice-dirs/cases/case1 + conda env create -f configuration/scripts/machines/environment.yml --force conda activate cice python timeseries.py ~/cice-dirs/cases/case1/logs conda deactivate # to deactivate the environment @@ -953,58 +956,49 @@ in shell startup files or otherwise at users discretion: .. _timeseries: -Timeseries Plotting +Plotting Tools ------------------- -The CICE scripts include two scripts that will generate timeseries figures from a -diagnostic output file, a Python version (``timeseries.py``) and a csh version -(``timeseries.csh``). Both scripts create the same set of plots, but the Python -script has more capabilities, and it's likely that the csh -script will be removed in the future. +CICE includes a couple of simple scripts to generate plots. The ``timeseries.py`` +scripts generates northern and southern hemisphere timeseries plots for several +fields from the CICE log file. The ``ciceplots2d.py`` script generates some +two-dimensional plots from CICE history files as global and polar projections. +The script ``ciceplots.csh`` is a general script that sets up the inputs for the +python plotting tools and calls them. Both python tools produce png files. -To use the ``timeseries.py`` script, the following requirements must be met: +To use the python scripts, the following python packages are required: -* Python v2.7 or later -* numpy Python package -* matplotlib Python package -* datetime Python package +* Python3 +* numpy +* matplotlib +* re +* datetime +* netcdf4 +* basemap, basemap-data, basemap-data-hires -See :ref:`CodeValidation` for additional information about how to setup the Python -environment, but we recommend using ``pip`` as follows: :: +The easist way to install the package is via the cice env file provided with CICE via conda: - pip install --user numpy - pip install --user matplotlib - pip install --user datetime - -When creating a case or test via ``cice.setup``, the ``timeseries.csh`` and -``timeseries.py`` scripts are automatically copied to the case directory. -Alternatively, the plotting scripts can be found in ``./configuration/scripts``, and can be -run from any directory. - -The Python script can be passed a directory, a specific log file, or no directory at all: + .. code-block:: bash - - If a directory is passed, the script will look either in that directory or in - directory/logs for a filename like cice.run*. As such, users can point the script - to either a case directory or the ``logs`` directory directly. The script will use - the file with the most recent creation time. - - If a specific file is passed the script parses that file, assuming that the file - matches the same form of cice.run* files. - - If nothing is passed, the script will look for log files or a ``logs`` directory in the - directory from where the script was run. + conda env create -f configuration/scripts/machines/environment.yml --force + conda activate cice -For example: +Then edit the ``ciceplots.csh`` script and run it. ``ciceplots.csh`` also demonstrates +how to call each python script separately. -Run the timeseries script on the desired case. :: +When creating a case or test via ``cice.setup``, these three plotting scripts +are automatically copied to the case directory. +Alternatively, the plotting scripts can be found in ``./configuration/scripts`` and can +be run as needed. -$ python timeseries.py /p/work1/turner/CICE_RUNS/conrad_intel_smoke_col_1x1_diag1_run1year.t00/ +Briefly, the ``timeseries.py`` script has a few options but can be called as follows: -or :: + .. code-block:: bash -$ python timeseries.py /p/work1/turner/CICE_RUNS/conrad_intel_smoke_col_1x1_diag1_run1year.t00/logs - -The output figures are placed in the directory where the ``timeseries.py`` script is run. + ./timeseries.py /p/work1/turner/CICE_RUNS/conrad_intel_smoke_col_1x1_diag1_run1year.t00 --grid --case CICE6.0.1 -The plotting script will plot the following variables by default, but you can also select +The timeseries script parses the log file, so the temporal resolution is based on the log output frequency. +The timeseries plotting script will plot the following variables by default, but you can also select specific plots to create via the optional command line arguments. - total ice area (:math:`km^2`) @@ -1013,30 +1007,14 @@ specific plots to create via the optional command line arguments. - total snow volume (:math:`m^3`) - RMS ice speed (:math:`m/s`) -For example, to plot only total ice volume and total snow volume :: +The ``ciceplots2d.py`` script is called as follows: -$ python timeseries.py /p/work1/turner/CICE_RUNS/conrad_intel_smoke_col_1x1_diag1_run1year.t00/ --volume --snw_vol - -To generate plots for all of the cases within a suite with a testid, create and run a script such as :: - - #!/bin/csh - foreach dir (`ls -1 | grep testid`) - echo $dir - python timeseries.py $dir - end - -Plots are only made for a single output file at a time. The ability to plot output from -a series of cice.run* files is not currently possible, but may be added in the future. -However, using the ``--bdir`` option will plot two datasets (from log files) on the -same figure. - -For the latest help information for the script, run :: - -$ python timeseries.py -h - -The ``timeseries.csh`` script works basically the same way as the Python version, however it -does not include all of the capabilities present in the Python version. + .. code-block:: bash -To use the C-Shell version of the script, :: + ./ciceplots2d.py aice /p/work1/turner/CICE_RUNS/conrad_intel_smoke_col_1x1_diag1_run1year.t00/history/iceh.2005-09.nc CICE6.0.1 "Sept 2005 Mean" 2005Sep -$ ./timeseries.csh /p/work1/turner/CICE_RUNS/conrad_intel_smoke_col_1x1_diag1_run1year.t00/ +In the example above, a global, northern hemisphere, and southern hemisphere plot would be created +for the aice field from iceh.2005-09.nc file. Titles on the plot would reference CICE6.0.1 and +"Sept 2005 Mean" and the png files would contain the string 2005Sep as well as the field name and region. +The two-dimensional plots are generated using the scatter feature from matplotlib, so they are fairly +primitive. diff --git a/icepack b/icepack index f6ff8f7c4..083d6e3cf 160000 --- a/icepack +++ b/icepack @@ -1 +1 @@ -Subproject commit f6ff8f7c4d4cb6feabe3651b13204cf43fc948e3 +Subproject commit 083d6e3cf42198bc7b4ffd1f02063c4c5b35b639