diff --git a/ci/cases/gfsv17/ocnanal.yaml b/ci/cases/gfsv17/ocnanal.yaml index 9024afcb31..a2d7363c18 100644 --- a/ci/cases/gfsv17/ocnanal.yaml +++ b/ci/cases/gfsv17/ocnanal.yaml @@ -14,17 +14,14 @@ base: DO_VERFRAD: "YES" DO_VRFY_OCEANDA: "NO" FHMAX_GFS: 240 + ACCOUNT: {{ 'HPC_ACCOUNT' | getenv }} ocnanal: - SOCA_INPUT_FIX_DIR: /scratch2/NCEPDEV/ocean/Guillaume.Vernieres/data/static/1440x1080x75/soca - CASE_ANL: 'C24' + SOCA_INPUT_FIX_DIR: {{ FIXgfs }}/gdas/soca/1440x1080x75/soca SOCA_OBS_LIST: {{ HOMEgfs }}/sorc/gdas.cd/parm/soca/obs/obs_list.yaml SOCA_NINNER: 100 - SABER_BLOCKS_YAML: '' - NICAS_RESOL: 1 - NICAS_GRID_SIZE: 15000 prepoceanobs: - SOCA_OBS_LIST: {{ HOMEgfs }}/sorc/gdas.cd/parm/soca/obs/obs_list.yaml + SOCA_OBS_LIST: {{ HOMEgfs }}/sorc/gdas.cd/parm/soca/obs/obs_list.yaml OBSPREP_YAML: {{ HOMEgfs }}/sorc/gdas.cd/parm/soca/obsprep/obsprep_config.yaml DMPDIR: /scratch1/NCEPDEV/da/common/ diff --git a/env/AWSPW.env b/env/AWSPW.env index 0598c01f25..867b9220ba 100755 --- a/env/AWSPW.env +++ b/env/AWSPW.env @@ -28,6 +28,9 @@ if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:- [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task} [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task} APRUN="${launcher} -n ${ntasks}" +else + echo "ERROR config.resources must be sourced before sourcing AWSPW.env" + exit 2 fi if [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then diff --git a/env/GAEA.env b/env/GAEA.env index 9a9d8b914f..6809a9b186 100755 --- a/env/GAEA.env +++ b/env/GAEA.env @@ -29,6 +29,9 @@ if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:- # This may be useful when Gaea is fully ported, so ignore SC warning # shellcheck disable=SC2034 APRUN="${launcher} -n ${ntasks}" +else + echo "ERROR config.resources must be sourced before sourcing GAEA.env" + exit 2 fi if [[ "${step}" = "waveinit" ]]; then diff --git a/env/HERA.env b/env/HERA.env index 8b6550ec7b..3f0e7c9f36 100755 --- a/env/HERA.env +++ b/env/HERA.env @@ -40,6 +40,9 @@ if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:- [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task} [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task} APRUN="${launcher} -n ${ntasks}" +else + echo "ERROR config.resources must be sourced before sourcing HERA.env" + exit 2 fi if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then @@ -103,11 +106,15 @@ elif [[ "${step}" = "snowanl" ]]; then export APRUN_APPLY_INCR="${launcher} -n 6" -elif [[ "${step}" = "ocnanalbmat" ]]; then +elif [[ "${step}" = "marinebmat" ]]; then export APRUNCFP="${launcher} -n \$ncmd --multi-prog" + export APRUN_MARINEBMAT="${APRUN}" - export APRUN_OCNANAL="${APRUN}" +elif [[ "${step}" = "marinebmat" ]]; then + + export APRUNCFP="${launcher} -n \$ncmd --multi-prog" + export APRUN_MARINEBMAT="${APRUN}" elif [[ "${step}" = "ocnanalrun" ]]; then @@ -184,11 +191,6 @@ elif [[ "${step}" = "eupd" ]]; then elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then - ppn="tasks_per_node" - [[ -z "${!ppn+0}" ]] && ppn="tasks_per_node" - nprocs="ntasks" - [[ -z ${!nprocs+0} ]] && nprocs="ntasks" - (( nnodes = (ntasks+tasks_per_node-1)/tasks_per_node )) (( ufs_ntasks = nnodes*tasks_per_node )) # With ESMF threading, the model wants to use the full node diff --git a/env/HERCULES.env b/env/HERCULES.env index 6afdc4c5d5..83fa1aadd1 100755 --- a/env/HERCULES.env +++ b/env/HERCULES.env @@ -37,6 +37,9 @@ if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:- [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task} [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task} APRUN="${launcher} -n ${ntasks}" +else + echo "ERROR config.resources must be sourced before sourcing HERCULES.env" + exit 2 fi case ${step} in @@ -102,19 +105,15 @@ case ${step} in export APRUN_APPLY_INCR="${launcher} -n 6" ;; - "ocnanalbmat") + "marinebmat") export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" - - export NTHREADS_OCNANAL=${NTHREADSmax} - export APRUN_OCNANAL="${APRUN} --cpus-per-task=${NTHREADS_OCNANAL}" + export APRUN_MARINEBMAT="${APRUN}" ;; "ocnanalrun") export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" - - export NTHREADS_OCNANAL=${NTHREADSmax} - export APRUN_OCNANAL="${APRUN} --cpus-per-task=${NTHREADS_OCNANAL}" + export APRUN_OCNANAL="${APRUN}" ;; "ocnanalecen") diff --git a/env/JET.env b/env/JET.env index f2fb4ed976..810a8cd501 100755 --- a/env/JET.env +++ b/env/JET.env @@ -28,6 +28,9 @@ if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:- [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task} [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task} APRUN="${launcher} -n ${ntasks}" +else + echo "ERROR config.resources must be sourced before sourcing JET.env" + exit 2 fi if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then @@ -91,18 +94,14 @@ elif [[ "${step}" = "atmanlfv3inc" ]]; then export NTHREADS_ATMANLFV3INC=${NTHREADSmax} export APRUN_ATMANLFV3INC="${APRUN}" -elif [[ "${step}" = "ocnanalbmat" ]]; then +elif [[ "${step}" = "marinebmat" ]]; then export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" - - export NTHREADS_OCNANAL=${NTHREADSmax} - export APRUN_OCNANAL="${APRUN}" + export APRUN_MARINEBMAT="${APRUN}" elif [[ "${step}" = "ocnanalrun" ]]; then export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" - - export NTHREADS_OCNANAL=${NTHREADSmax} export APRUN_OCNANAL="${APRUN}" elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then diff --git a/env/ORION.env b/env/ORION.env index 5d6889c9cf..bbbfb59182 100755 --- a/env/ORION.env +++ b/env/ORION.env @@ -35,6 +35,9 @@ if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:- [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task} [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task} APRUN="${launcher} -n ${ntasks}" +else + echo "ERROR config.resources must be sourced before sourcing ORION.env" + exit 2 fi if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then @@ -99,19 +102,18 @@ elif [[ "${step}" = "atmanlfv3inc" ]]; then export NTHREADS_ATMANLFV3INC=${NTHREADSmax} export APRUN_ATMANLFV3INC="${APRUN} --cpus-per-task=${NTHREADS_ATMANLFV3INC}" -elif [[ "${step}" = "ocnanalbmat" ]]; then +elif [[ "${step}" = "marinebmat" ]]; then export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" - export NTHREADS_OCNANAL=${NTHREADSmax} - export APRUN_OCNANAL="${APRUN} --cpus-per-task=${NTHREADS_OCNANAL}" + export NTHREADS_MARINEBMAT=${NTHREADSmax} + export APRUN_MARINEBMAT="${APRUN}" elif [[ "${step}" = "ocnanalrun" ]]; then export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}" - export NTHREADS_OCNANAL=${NTHREADSmax} - export APRUN_OCNANAL="${APRUN} --cpus-per-task=${NTHREADS_OCNANAL}" + export APRUN_OCNANAL="${APRUN}" elif [[ "${step}" = "ocnanalchkpt" ]]; then diff --git a/env/S4.env b/env/S4.env index 5b1644a242..840ca65898 100755 --- a/env/S4.env +++ b/env/S4.env @@ -28,6 +28,9 @@ if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:- [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task} [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task} APRUN="${launcher} -n ${ntasks}" +else + echo "ERROR config.resources must be sourced before sourcing S4.env" + exit 2 fi if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then @@ -91,10 +94,10 @@ elif [[ "${step}" = "atmanlfv3inc" ]]; then export NTHREADS_ATMANLFV3INC=${NTHREADSmax} export APRUN_ATMANLFV3INC="${APRUN}" -elif [[ "${step}" = "ocnanalbmat" ]]; then +elif [[ "${step}" = "marinebmat" ]]; then echo "WARNING: ${step} is not enabled on S4!" -elif [[ "${step}" = "ocnanalrun" ]]; then +elif [[ "${step}" = "marinerun" ]]; then echo "WARNING: ${step} is not enabled on S4!" elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then diff --git a/env/WCOSS2.env b/env/WCOSS2.env index 27e5c667bf..18caf1bc03 100755 --- a/env/WCOSS2.env +++ b/env/WCOSS2.env @@ -22,6 +22,9 @@ if [[ -n "${ntasks:-}" && -n "${max_tasks_per_node:-}" && -n "${tasks_per_node:- [[ ${NTHREADSmax} -gt ${max_threads_per_task} ]] && NTHREADSmax=${max_threads_per_task} [[ ${NTHREADS1} -gt ${max_threads_per_task} ]] && NTHREADS1=${max_threads_per_task} APRUN="${launcher} -n ${ntasks}" +else + echo "ERROR config.resources must be sourced before sourcing WCOSS2.env" + exit 2 fi if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then @@ -153,11 +156,6 @@ elif [[ "${step}" = "eupd" ]]; then elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then - ppn="tasks_per_node" - [[ -z "${!ppn+0}" ]] && ppn="tasks_per_node" - nprocs="ntasks" - [[ -z ${!nprocs+0} ]] && nprocs="ntasks" - (( nnodes = (ntasks+tasks_per_node-1)/tasks_per_node )) (( ufs_ntasks = nnodes*tasks_per_node )) # With ESMF threading, the model wants to use the full node diff --git a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT deleted file mode 100755 index 90902ba3c3..0000000000 --- a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT +++ /dev/null @@ -1,48 +0,0 @@ -#!/bin/bash -source "${HOMEgfs}/ush/preamble.sh" -export WIPE_DATA="NO" - -export DATA="${DATAROOT}/${RUN}ocnanal_${cyc}" -source "${HOMEgfs}/ush/jjob_header.sh" -e "ocnanalbmat" -c "base ocnanal ocnanalbmat" - - -############################################## -# Set variables used in the script -############################################## - - -############################################## -# Begin JOB SPECIFIC work -############################################## - -YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_OCEAN_ANALYSIS - -mkdir -p "${COM_OCEAN_ANALYSIS}" - -export COMOUT=${COM_OCEAN_ANALYSIS} - -############################################################### -# Run relevant script - -EXSCRIPT=${GDASOCNBMATSH:-${HOMEgfs}/sorc/gdas.cd/scripts/exgdas_global_marine_analysis_bmat.sh} -${EXSCRIPT} -status=$? -[[ ${status} -ne 0 ]] && exit "${status}" - -############################################## -# End JOB SPECIFIC work -############################################## - -############################################## -# Final processing -############################################## -if [[ -e "${pgmout}" ]] ; then - cat "${pgmout}" -fi - -########################################## -# Do not remove the Temporary working directory (do this in POST) -########################################## -cd "${DATAROOT}" || exit 1 - -exit 0 diff --git a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT_VRFY b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT_VRFY deleted file mode 100755 index 3727ba9853..0000000000 --- a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT_VRFY +++ /dev/null @@ -1,47 +0,0 @@ -#!/bin/bash -source "${HOMEgfs}/ush/preamble.sh" -export WIPE_DATA="NO" -export DATA="${DATAROOT}/${RUN}ocnanal_${cyc}" -source "${HOMEgfs}/ush/jjob_header.sh" -e "ocnanalrun" -c "base ocnanal ocnanalrun" - - -############################################## -# Set variables used in the script -############################################## - - -############################################## -# Begin JOB SPECIFIC work -############################################## - -YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COM_OCEAN_ANALYSIS - -mkdir -p "${COM_OCEAN_ANALYSIS}" - -export COMOUT=${COM_OCEAN_ANALYSIS} - -############################################################### -# Run relevant script - -EXSCRIPT=${GDASOCNMBATVRFYSH:-${HOMEgfs}/sorc/gdas.cd/scripts/exgdas_global_marine_analysis_bmat_vrfy.sh} -${EXSCRIPT} -status=$? -[[ ${status} -ne 0 ]] && exit "${status}" - -############################################## -# End JOB SPECIFIC work -############################################## - -############################################## -# Final processing -############################################## -if [[ -e "${pgmout}" ]] ; then - cat "${pgmout}" -fi - -########################################## -# Do not remove the Temporary working directory (do this in POST) -########################################## -cd "${DATAROOT}" || exit 1 - -exit 0 diff --git a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_PREP b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_PREP index 28e30ebd72..664df3aad6 100755 --- a/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_PREP +++ b/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_PREP @@ -27,6 +27,10 @@ RUN=${GDUMP} YMD=${gPDY} HH=${gcyc} declare_from_tmpl -rx \ COM_ICE_HISTORY_PREV:COM_ICE_HISTORY_TMPL \ COM_ICE_RESTART_PREV:COM_ICE_RESTART_TMPL +YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \ + COMIN_OCEAN_BMATRIX:COM_OCEAN_BMATRIX_TMPL \ + COMIN_ICE_BMATRIX:COM_ICE_BMATRIX_TMPL + ############################################## # Begin JOB SPECIFIC work ############################################## diff --git a/jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF b/jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF index cd8c76eadd..d62c3320a1 100755 --- a/jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF +++ b/jobs/JGFS_ATMOS_GEMPAK_NCDC_UPAPGIF @@ -4,7 +4,7 @@ # GFS GEMPAK NCDC PRODUCT GENERATION ############################################ source "${HOMEgfs}/ush/preamble.sh" -source "${HOMEgfs}/ush/jjob_header.sh" -e "gempak_gif" -c "base" +source "${HOMEgfs}/ush/jjob_header.sh" -e "gempak_gif" -c "base gempak" export MP_PULSE=0 export MP_TIMEOUT=2000 diff --git a/jobs/JGFS_ATMOS_PGRB2_SPEC_NPOESS b/jobs/JGFS_ATMOS_PGRB2_SPEC_NPOESS index 1b2cfd9f0c..72dba0679d 100755 --- a/jobs/JGFS_ATMOS_PGRB2_SPEC_NPOESS +++ b/jobs/JGFS_ATMOS_PGRB2_SPEC_NPOESS @@ -6,7 +6,7 @@ # GFS PGRB2_SPECIAL_POST PRODUCT GENERATION ############################################ source "${HOMEgfs}/ush/preamble.sh" -source "${HOMEgfs}/ush/jjob_header.sh" -e "npoess" -c "base" +source "${HOMEgfs}/ush/jjob_header.sh" -e "npoess" -c "base npoess" export OMP_NUM_THREADS=${OMP_NUM_THREADS:-1} diff --git a/jobs/JGLOBAL_ARCHIVE b/jobs/JGLOBAL_ARCHIVE index c909cd8801..401feba35f 100755 --- a/jobs/JGLOBAL_ARCHIVE +++ b/jobs/JGLOBAL_ARCHIVE @@ -33,6 +33,8 @@ YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \ COMIN_OCEAN_GRIB:COM_OCEAN_GRIB_TMPL \ COMIN_OCEAN_NETCDF:COM_OCEAN_NETCDF_TMPL \ COMIN_OCEAN_ANALYSIS:COM_OCEAN_ANALYSIS_TMPL \ + COMIN_OCEAN_BMATRIX:COM_OCEAN_BMATRIX_TMPL \ + COMIN_ICE_BMATRIX:COM_ICE_BMATRIX_TMPL \ COMIN_WAVE_GRID:COM_WAVE_GRID_TMPL \ COMIN_WAVE_HISTORY:COM_WAVE_HISTORY_TMPL \ COMIN_WAVE_STATION:COM_WAVE_STATION_TMPL \ diff --git a/jobs/JGLOBAL_MARINE_BMAT b/jobs/JGLOBAL_MARINE_BMAT new file mode 100755 index 0000000000..3dacec9278 --- /dev/null +++ b/jobs/JGLOBAL_MARINE_BMAT @@ -0,0 +1,66 @@ +#!/bin/bash + +source "${HOMEgfs}/ush/preamble.sh" + +if (( 10#${ENSMEM:-0} > 0 )); then + export DATAjob="${DATAROOT}/${RUN}marinebmat.${PDY:-}${cyc}" + export DATA="${DATAjob}/${jobid}" + # Create the directory to hold ensemble perturbations + export DATAenspert="${DATAjob}/enspert" + if [[ ! -d "${DATAenspert}" ]]; then mkdir -p "${DATAenspert}"; fi +fi + +# source config.base, config.ocnanal and config.marinebmat +# and pass marinebmat to ${machine}.env +source "${HOMEgfs}/ush/jjob_header.sh" -e "marinebmat" -c "base ocnanal marinebmat" + +############################################## +# Set variables used in the script +############################################## +# shellcheck disable=SC2153 +GDATE=$(date --utc +%Y%m%d%H -d "${PDY} ${cyc} - ${assim_freq} hours") +gPDY=${GDATE:0:8} +gcyc=${GDATE:8:2} +export GDUMP="gdas" +export GDUMP_ENS="enkf${GDUMP}" + +############################################## +# Begin JOB SPECIFIC work +############################################## + +# Generate COM variables from templates +RUN=${GDUMP} YMD=${gPDY} HH=${gcyc} declare_from_tmpl -rx \ + COMIN_OCEAN_HISTORY_PREV:COM_OCEAN_HISTORY_TMPL \ + COMIN_ICE_HISTORY_PREV:COM_ICE_HISTORY_TMPL + +RUN=${GDUMP_ENS} YMD=${gPDY} HH=${gcyc} declare_from_tmpl -rx \ + COMIN_OCEAN_HISTORY_ENS_PREV:COM_OCEAN_HISTORY_TMPL \ + COMIN_ICE_HISTORY_ENS_PREV:COM_ICE_HISTORY_TMPL + +YMD=${PDY} HH=${cyc} declare_from_tmpl -rx \ + COMOUT_OCEAN_BMATRIX:COM_OCEAN_BMATRIX_TMPL \ + COMOUT_ICE_BMATRIX:COM_ICE_BMATRIX_TMPL + +mkdir -p "${COMOUT_OCEAN_BMATRIX}" +mkdir -p "${COMOUT_ICE_BMATRIX}" + +############################################################### +# Run relevant script + +EXSCRIPT=${GDASMARINEBMATRUNPY:-${SCRgfs}/exglobal_marinebmat.py} +${EXSCRIPT} +status=$? +[[ ${status} -ne 0 ]] && exit "${status}" + +############################################## +# End JOB SPECIFIC work +############################################## + +############################################## +# Final processing +############################################## +if [[ -e "${pgmout}" ]] ; then + cat "${pgmout}" +fi + +exit 0 diff --git a/jobs/rocoto/awips_20km_1p0deg.sh b/jobs/rocoto/awips_20km_1p0deg.sh index b2a291e37e..af08b46111 100755 --- a/jobs/rocoto/awips_20km_1p0deg.sh +++ b/jobs/rocoto/awips_20km_1p0deg.sh @@ -8,7 +8,7 @@ source "${HOMEgfs}/ush/preamble.sh" ## HOMEgfs : /full/path/to/workflow ## EXPDIR : /full/path/to/config/files ## CDATE : current analysis date (YYYYMMDDHH) -## CDUMP : cycle name (gdas / gfs) +## RUN : cycle name (gdas / gfs) ## PDY : current date (YYYYMMDD) ## cyc : current cycle (HH) ############################################################### diff --git a/jobs/rocoto/ocnanalbmat.sh b/jobs/rocoto/marinebmat.sh similarity index 79% rename from jobs/rocoto/ocnanalbmat.sh rename to jobs/rocoto/marinebmat.sh index e62db9115a..9b72e5e12c 100755 --- a/jobs/rocoto/ocnanalbmat.sh +++ b/jobs/rocoto/marinebmat.sh @@ -8,12 +8,11 @@ source "${HOMEgfs}/ush/preamble.sh" status=$? [[ "${status}" -ne 0 ]] && exit "${status}" -export job="ocnanalbmat" +export job="marinebmat" export jobid="${job}.$$" ############################################################### # Execute the JJOB -"${HOMEgfs}"/jobs/JGDAS_GLOBAL_OCEAN_ANALYSIS_BMAT -echo "BMAT gets run here" +"${HOMEgfs}"/jobs/JGLOBAL_MARINE_BMAT status=$? exit "${status}" diff --git a/jobs/rocoto/prep.sh b/jobs/rocoto/prep.sh index a885bd1c7f..bbde68377d 100755 --- a/jobs/rocoto/prep.sh +++ b/jobs/rocoto/prep.sh @@ -13,7 +13,8 @@ export job="prep" export jobid="${job}.$$" source "${HOMEgfs}/ush/jjob_header.sh" -e "prep" -c "base prep" -CDUMP="${RUN/enkf}" +# Strip 'enkf' from RUN for pulling data +RUN_local="${RUN/enkf}" ############################################################### # Set script and dependency variables @@ -25,9 +26,9 @@ gPDY=${GDATE:0:8} gcyc=${GDATE:8:2} GDUMP="gdas" -export OPREFIX="${CDUMP}.t${cyc}z." +export OPREFIX="${RUN_local}.t${cyc}z." -YMD=${PDY} HH=${cyc} DUMP=${CDUMP} declare_from_tmpl -rx COM_OBS COM_OBSDMP +YMD=${PDY} HH=${cyc} DUMP=${RUN_local} declare_from_tmpl -rx COM_OBS COM_OBSDMP RUN=${GDUMP} DUMP=${GDUMP} YMD=${gPDY} HH=${gcyc} declare_from_tmpl -rx \ COM_OBS_PREV:COM_OBS_TMPL \ @@ -39,7 +40,7 @@ if [[ ! -d "${COM_OBS}" ]]; then mkdir -p "${COM_OBS}"; fi ############################################################### # If ROTDIR_DUMP=YES, copy dump files to rotdir if [[ ${ROTDIR_DUMP} = "YES" ]]; then - "${HOMEgfs}/ush/getdump.sh" "${PDY}${cyc}" "${CDUMP}" "${COM_OBSDMP}" "${COM_OBS}" + "${HOMEgfs}/ush/getdump.sh" "${PDY}${cyc}" "${RUN_local}" "${COM_OBSDMP}" "${COM_OBS}" status=$? [[ ${status} -ne 0 ]] && exit ${status} @@ -73,14 +74,14 @@ if [[ ${PROCESS_TROPCY} = "YES" ]]; then done fi - if [[ ${ROTDIR_DUMP} = "YES" ]]; then rm "${COM_OBS}/${CDUMP}.t${cyc}z.syndata.tcvitals.tm00"; fi + if [[ ${ROTDIR_DUMP} = "YES" ]]; then rm "${COM_OBS}/${RUN_local}.t${cyc}z.syndata.tcvitals.tm00"; fi "${HOMEgfs}/jobs/JGLOBAL_ATMOS_TROPCY_QC_RELOC" status=$? [[ ${status} -ne 0 ]] && exit ${status} else - if [[ ${ROTDIR_DUMP} = "NO" ]]; then cp "${COM_OBSDMP}/${CDUMP}.t${cyc}z.syndata.tcvitals.tm00" "${COM_OBS}/"; fi + if [[ ${ROTDIR_DUMP} = "NO" ]]; then cp "${COM_OBSDMP}/${RUN_local}.t${cyc}z.syndata.tcvitals.tm00" "${COM_OBS}/"; fi fi @@ -93,17 +94,17 @@ if [[ ${MAKE_PREPBUFR} = "YES" ]]; then rm -f "${COM_OBS}/${OPREFIX}nsstbufr" fi - export job="j${CDUMP}_prep_${cyc}" + export job="j${RUN_local}_prep_${cyc}" export COMIN=${COM_OBS} export COMOUT=${COM_OBS} RUN="gdas" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COMINgdas:COM_ATMOS_HISTORY_TMPL RUN="gfs" YMD=${PDY} HH=${cyc} declare_from_tmpl -rx COMINgfs:COM_ATMOS_HISTORY_TMPL if [[ ${ROTDIR_DUMP} = "NO" ]]; then - export COMSP=${COMSP:-"${COM_OBSDMP}/${CDUMP}.t${cyc}z."} + export COMSP=${COMSP:-"${COM_OBSDMP}/${RUN_local}.t${cyc}z."} else - export COMSP=${COMSP:-"${COM_OBS}/${CDUMP}.t${cyc}z."} + export COMSP=${COMSP:-"${COM_OBS}/${RUN_local}.t${cyc}z."} fi - export COMSP=${COMSP:-${COMIN_OBS}/${CDUMP}.t${cyc}z.} + export COMSP=${COMSP:-${COMIN_OBS}/${RUN_local}.t${cyc}z.} # Disable creating NSSTBUFR if desired, copy from DMPDIR instead if [[ ${MAKE_NSSTBUFR:-"NO"} = "NO" ]]; then diff --git a/parm/archive/gdas.yaml.j2 b/parm/archive/gdas.yaml.j2 index 7dd87f122b..ce5054a82f 100644 --- a/parm/archive/gdas.yaml.j2 +++ b/parm/archive/gdas.yaml.j2 @@ -22,7 +22,7 @@ gdas: {% if DO_JEDIOCNVAR %} - "logs/{{ cycle_YMDH }}/{{ RUN }}prepoceanobs.log" - "logs/{{ cycle_YMDH }}/{{ RUN }}ocnanalprep.log" - - "logs/{{ cycle_YMDH }}/{{ RUN }}ocnanalbmat.log" + - "logs/{{ cycle_YMDH }}/{{ RUN }}marinebmat.log" - "logs/{{ cycle_YMDH }}/{{ RUN }}ocnanalrun.log" - "logs/{{ cycle_YMDH }}/{{ RUN }}ocnanalpost.log" - "logs/{{ cycle_YMDH }}/{{ RUN }}ocnanalchkpt.log" diff --git a/parm/archive/gdasocean_analysis.yaml.j2 b/parm/archive/gdasocean_analysis.yaml.j2 index d127ee0b75..b7c057eacf 100644 --- a/parm/archive/gdasocean_analysis.yaml.j2 +++ b/parm/archive/gdasocean_analysis.yaml.j2 @@ -3,23 +3,30 @@ gdasocean_analysis: name: "GDASOCEAN_ANALYSIS" target: "{{ ATARDIR }}/{{ cycle_YMDH }}/gdasocean_analysis.tar" required: + # analysis and analysis increments - '{{ COMIN_OCEAN_ANALYSIS | relpath(ROTDIR) }}/{{ head }}ocninc.nc' - {% set ocngrid_cycle = '%02d' % (((cycle_HH | int) - 3) % 24) %} - - '{{ COMIN_OCEAN_ANALYSIS | relpath(ROTDIR) }}/gdas.t{{ ocngrid_cycle }}z.ocngrid.nc' {% for domain in ["ocn", "ice"] %} - - '{{ COMIN_OCEAN_ANALYSIS | relpath(ROTDIR) }}/{{ head }}{{domain}}.bkgerr_stddev.nc' - '{{ COMIN_OCEAN_ANALYSIS | relpath(ROTDIR) }}/{{ head }}{{domain}}.incr.nc' - '{{ COMIN_OCEAN_ANALYSIS | relpath(ROTDIR) }}/{{ head }}{{domain}}ana.nc' - {% if NMEM_ENS > 2 %} - - '{{ COMIN_OCEAN_ANALYSIS | relpath(ROTDIR) }}/{{ head }}{{domain}}.recentering_error.nc' - {% endif %} {% endfor %} + + # static background error + - '{{ COMIN_OCEAN_BMATRIX | relpath(ROTDIR) }}/{{ head }}ocean.bkgerr_stddev.nc' + - '{{ COMIN_ICE_BMATRIX | relpath(ROTDIR) }}/{{ head }}ice.bkgerr_stddev.nc' + + # ensemble background error {% if NMEM_ENS > 2 %} - - '{{ COMIN_OCEAN_ANALYSIS | relpath(ROTDIR) }}/{{ head }}ocn.ssh_steric_stddev.nc' - - '{{ COMIN_OCEAN_ANALYSIS | relpath(ROTDIR) }}/{{ head }}ocn.ssh_unbal_stddev.nc' - - '{{ COMIN_OCEAN_ANALYSIS | relpath(ROTDIR) }}/{{ head }}ocn.ssh_total_stddev.nc' - - '{{ COMIN_OCEAN_ANALYSIS | relpath(ROTDIR) }}/{{ head }}ocn.steric_explained_variance.nc' + - '{{ COMIN_ICE_BMATRIX | relpath(ROTDIR) }}/{{ head }}ice.ens_weights.nc' + - '{{ COMIN_OCEAN_BMATRIX | relpath(ROTDIR) }}/{{ head }}ocean.ens_weights.nc' + - '{{ COMIN_OCEAN_BMATRIX | relpath(ROTDIR) }}/{{ head }}ocean.recentering_error.nc' + {% for diag_type in ["ssh_steric_stddev", "ssh_unbal_stddev", "ssh_total_stddev", "steric_explained_variance"] %} + - '{{ COMIN_OCEAN_BMATRIX | relpath(ROTDIR) }}/{{ head }}ocean.{{ diag_type }}.nc' + {% endfor %} {% endif %} + + # obs space diags - '{{ COMIN_OCEAN_ANALYSIS | relpath(ROTDIR) }}/{{ head }}ocn.*.stats.csv' - '{{ COMIN_OCEAN_ANALYSIS | relpath(ROTDIR) }}/diags/*.nc4' + + # runtime configs - '{{ COMIN_OCEAN_ANALYSIS | relpath(ROTDIR) }}/yaml/*.yaml' diff --git a/parm/config/gefs/config.resources b/parm/config/gefs/config.resources index 7510d38839..81d2a20635 100644 --- a/parm/config/gefs/config.resources +++ b/parm/config/gefs/config.resources @@ -80,120 +80,110 @@ case ${step} in "fcst" | "efcs") export is_exclusive=True - _RUN_LIST=${RUN:-"gefs gfs"} - - # During workflow creation, we need resources for all RUNs and RUN is undefined - for _RUN in ${_RUN_LIST}; do - if [[ "${_RUN}" =~ "gfs" ]]; then - export layout_x=${layout_x_gfs} - export layout_y=${layout_y_gfs} - export WRITE_GROUP=${WRITE_GROUP_GFS} - export WRTTASK_PER_GROUP_PER_THREAD=${WRTTASK_PER_GROUP_PER_THREAD_GFS} - ntasks_fv3=${ntasks_fv3_gfs} - ntasks_quilt=${ntasks_quilt_gfs} - nthreads_fv3=${nthreads_fv3_gfs} - nthreads_ufs=${nthreads_ufs_gfs} - fi - - # Determine if using ESMF-managed threading or traditional threading - # If using traditional threading, set them to 1 - if [[ "${USE_ESMF_THREADING:-}" == "YES" ]]; then - export UFS_THREADS=1 - else # traditional threading - export UFS_THREADS=${nthreads_ufs:-1} - nthreads_fv3=1 - nthreads_mediator=1 - [[ "${DO_WAVE}" == "YES" ]] && nthreads_ww3=1 - [[ "${DO_OCN}" == "YES" ]] && nthreads_mom6=1 - [[ "${DO_ICE}" == "YES" ]] && nthreads_cice6=1 - fi - - # PETS for the atmosphere dycore - (( FV3PETS = ntasks_fv3 * nthreads_fv3 )) - echo "FV3 using (nthreads, PETS) = (${nthreads_fv3}, ${FV3PETS})" - - # PETS for quilting - if [[ "${QUILTING:-}" == ".true." ]]; then - (( QUILTPETS = ntasks_quilt * nthreads_fv3 )) - (( WRTTASK_PER_GROUP = WRTTASK_PER_GROUP_PER_THREAD )) - export WRTTASK_PER_GROUP - else - QUILTPETS=0 - fi - echo "QUILT using (nthreads, PETS) = (${nthreads_fv3}, ${QUILTPETS})" - - # Total PETS for the atmosphere component - ATMTHREADS=${nthreads_fv3} - (( ATMPETS = FV3PETS + QUILTPETS )) - export ATMPETS ATMTHREADS - echo "FV3ATM using (nthreads, PETS) = (${ATMTHREADS}, ${ATMPETS})" - - # Total PETS for the coupled model (starting w/ the atmosphere) - NTASKS_TOT=${ATMPETS} - - # The mediator PETS can overlap with other components, usually it lands on the atmosphere tasks. - # However, it is suggested limiting mediator PETS to 300, as it may cause the slow performance. - # See https://docs.google.com/document/d/1bKpi-52t5jIfv2tuNHmQkYUe3hkKsiG_DG_s6Mnukog/edit - # TODO: Update reference when moved to ufs-weather-model RTD - MEDTHREADS=${nthreads_mediator:-1} - MEDPETS=${MEDPETS:-${FV3PETS}} - (( "${MEDPETS}" > 300 )) && MEDPETS=300 - export MEDPETS MEDTHREADS - echo "MEDIATOR using (threads, PETS) = (${MEDTHREADS}, ${MEDPETS})" - - CHMPETS=0; CHMTHREADS=0 - if [[ "${DO_AERO}" == "YES" ]]; then - # GOCART shares the same grid and forecast tasks as FV3 (do not add write grid component tasks). - (( CHMTHREADS = ATMTHREADS )) - (( CHMPETS = FV3PETS )) - # Do not add to NTASKS_TOT - echo "GOCART using (threads, PETS) = (${CHMTHREADS}, ${CHMPETS})" - fi - export CHMPETS CHMTHREADS - - WAVPETS=0; WAVTHREADS=0 - if [[ "${DO_WAVE}" == "YES" ]]; then - (( WAVPETS = ntasks_ww3 * nthreads_ww3 )) - (( WAVTHREADS = nthreads_ww3 )) - echo "WW3 using (threads, PETS) = (${WAVTHREADS}, ${WAVPETS})" - (( NTASKS_TOT = NTASKS_TOT + WAVPETS )) - fi - export WAVPETS WAVTHREADS - - OCNPETS=0; OCNTHREADS=0 - if [[ "${DO_OCN}" == "YES" ]]; then - (( OCNPETS = ntasks_mom6 * nthreads_mom6 )) - (( OCNTHREADS = nthreads_mom6 )) - echo "MOM6 using (threads, PETS) = (${OCNTHREADS}, ${OCNPETS})" - (( NTASKS_TOT = NTASKS_TOT + OCNPETS )) - fi - export OCNPETS OCNTHREADS - - ICEPETS=0; ICETHREADS=0 - if [[ "${DO_ICE}" == "YES" ]]; then - (( ICEPETS = ntasks_cice6 * nthreads_cice6 )) - (( ICETHREADS = nthreads_cice6 )) - echo "CICE6 using (threads, PETS) = (${ICETHREADS}, ${ICEPETS})" - (( NTASKS_TOT = NTASKS_TOT + ICEPETS )) - fi - export ICEPETS ICETHREADS - - echo "Total PETS for ${_RUN} = ${NTASKS_TOT}" - - declare -x "ntasks_${_RUN}"="${NTASKS_TOT}" - declare -x "threads_per_task_${_RUN}"="${UFS_THREADS}" - declare -x "tasks_per_node_${_RUN}"="${max_tasks_per_node}" - - done + export layout_x=${layout_x_gfs} + export layout_y=${layout_y_gfs} + export WRITE_GROUP=${WRITE_GROUP_GFS} + export WRTTASK_PER_GROUP_PER_THREAD=${WRTTASK_PER_GROUP_PER_THREAD_GFS} + ntasks_fv3=${ntasks_fv3_gfs} + ntasks_quilt=${ntasks_quilt_gfs} + nthreads_fv3=${nthreads_fv3_gfs} + nthreads_ufs=${nthreads_ufs_gfs} + + # Determine if using ESMF-managed threading or traditional threading + # If using traditional threading, set them to 1 + if [[ "${USE_ESMF_THREADING:-}" == "YES" ]]; then + export UFS_THREADS=1 + else # traditional threading + export UFS_THREADS=${nthreads_ufs:-1} + nthreads_fv3=1 + nthreads_mediator=1 + [[ "${DO_WAVE}" == "YES" ]] && nthreads_ww3=1 + [[ "${DO_OCN}" == "YES" ]] && nthreads_mom6=1 + [[ "${DO_ICE}" == "YES" ]] && nthreads_cice6=1 + fi + + # PETS for the atmosphere dycore + (( FV3PETS = ntasks_fv3 * nthreads_fv3 )) + echo "FV3 using (nthreads, PETS) = (${nthreads_fv3}, ${FV3PETS})" + + # PETS for quilting + if [[ "${QUILTING:-}" == ".true." ]]; then + (( QUILTPETS = ntasks_quilt * nthreads_fv3 )) + (( WRTTASK_PER_GROUP = WRTTASK_PER_GROUP_PER_THREAD )) + export WRTTASK_PER_GROUP + else + QUILTPETS=0 + fi + echo "QUILT using (nthreads, PETS) = (${nthreads_fv3}, ${QUILTPETS})" + + # Total PETS for the atmosphere component + ATMTHREADS=${nthreads_fv3} + (( ATMPETS = FV3PETS + QUILTPETS )) + export ATMPETS ATMTHREADS + echo "FV3ATM using (nthreads, PETS) = (${ATMTHREADS}, ${ATMPETS})" + + # Total PETS for the coupled model (starting w/ the atmosphere) + NTASKS_TOT=${ATMPETS} + + # The mediator PETS can overlap with other components, usually it lands on the atmosphere tasks. + # However, it is suggested limiting mediator PETS to 300, as it may cause the slow performance. + # See https://docs.google.com/document/d/1bKpi-52t5jIfv2tuNHmQkYUe3hkKsiG_DG_s6Mnukog/edit + # TODO: Update reference when moved to ufs-weather-model RTD + MEDTHREADS=${nthreads_mediator:-1} + MEDPETS=${MEDPETS:-${FV3PETS}} + (( "${MEDPETS}" > 300 )) && MEDPETS=300 + export MEDPETS MEDTHREADS + echo "MEDIATOR using (threads, PETS) = (${MEDTHREADS}, ${MEDPETS})" + + CHMPETS=0; CHMTHREADS=0 + if [[ "${DO_AERO}" == "YES" ]]; then + # GOCART shares the same grid and forecast tasks as FV3 (do not add write grid component tasks). + (( CHMTHREADS = ATMTHREADS )) + (( CHMPETS = FV3PETS )) + # Do not add to NTASKS_TOT + echo "GOCART using (threads, PETS) = (${CHMTHREADS}, ${CHMPETS})" + fi + export CHMPETS CHMTHREADS + + WAVPETS=0; WAVTHREADS=0 + if [[ "${DO_WAVE}" == "YES" ]]; then + (( WAVPETS = ntasks_ww3 * nthreads_ww3 )) + (( WAVTHREADS = nthreads_ww3 )) + echo "WW3 using (threads, PETS) = (${WAVTHREADS}, ${WAVPETS})" + (( NTASKS_TOT = NTASKS_TOT + WAVPETS )) + fi + export WAVPETS WAVTHREADS + + OCNPETS=0; OCNTHREADS=0 + if [[ "${DO_OCN}" == "YES" ]]; then + (( OCNPETS = ntasks_mom6 * nthreads_mom6 )) + (( OCNTHREADS = nthreads_mom6 )) + echo "MOM6 using (threads, PETS) = (${OCNTHREADS}, ${OCNPETS})" + (( NTASKS_TOT = NTASKS_TOT + OCNPETS )) + fi + export OCNPETS OCNTHREADS + + ICEPETS=0; ICETHREADS=0 + if [[ "${DO_ICE}" == "YES" ]]; then + (( ICEPETS = ntasks_cice6 * nthreads_cice6 )) + (( ICETHREADS = nthreads_cice6 )) + echo "CICE6 using (threads, PETS) = (${ICETHREADS}, ${ICEPETS})" + (( NTASKS_TOT = NTASKS_TOT + ICEPETS )) + fi + export ICEPETS ICETHREADS + + echo "Total PETS = ${NTASKS_TOT}" + + declare -x "ntasks"="${NTASKS_TOT}" + declare -x "threads_per_task"="${UFS_THREADS}" + declare -x "tasks_per_node"="${max_tasks_per_node}" case "${CASE}" in "C48" | "C96" | "C192") - declare -x "walltime_gefs"="03:00:00" - declare -x "walltime_gfs"="03:00:00" + declare -x "walltime"="03:00:00" ;; "C384" | "C768" | "C1152") - declare -x "walltime_gefs"="06:00:00" - declare -x "walltime_gfs"="06:00:00" + declare -x "walltime"="06:00:00" ;; *) echo "FATAL ERROR: Resources not defined for job ${step} at resolution ${CASE}" @@ -201,7 +191,6 @@ case ${step} in ;; esac - unset _RUN _RUN_LIST unset NTASKS_TOT ;; diff --git a/parm/config/gfs/config.anal b/parm/config/gfs/config.anal index d285210197..27ff8742e4 100644 --- a/parm/config/gfs/config.anal +++ b/parm/config/gfs/config.anal @@ -12,7 +12,7 @@ if [[ ${DONST} = "YES" ]]; then . ${EXPDIR}/config.nsst fi -if [[ "${RUN}" = "gfs" ]] ; then +if [[ "${RUN}" == "gfs" ]] ; then export USE_RADSTAT="NO" # This can be only used when bias correction is not-zero. export GENDIAG="NO" export SETUP='diag_rad=.false.,diag_pcp=.false.,diag_conv=.false.,diag_ozone=.false.,write_diag(3)=.false.,niter(2)=100,' diff --git a/parm/config/gfs/config.base b/parm/config/gfs/config.base index 4ce3c8f45c..d45d91961d 100644 --- a/parm/config/gfs/config.base +++ b/parm/config/gfs/config.base @@ -449,6 +449,8 @@ export binary_diag=".false." # Verification options export DO_METP="@DO_METP@" # Run METPLUS jobs - set METPLUS settings in config.metp +# TODO Reenable METplus jobs when issue #2790 is resolved +export DO_METP="NO" export DO_FIT2OBS="YES" # Run fit to observations package export DO_VRFY_OCEANDA="@DO_VRFY_OCEANDA@" # Run SOCA Ocean and Seaice DA verification tasks diff --git a/parm/config/gfs/config.com b/parm/config/gfs/config.com index ec867e64ba..222ffdae95 100644 --- a/parm/config/gfs/config.com +++ b/parm/config/gfs/config.com @@ -82,11 +82,13 @@ declare -rx COM_OCEAN_HISTORY_TMPL=${COM_BASE}'/model_data/ocean/history' declare -rx COM_OCEAN_RESTART_TMPL=${COM_BASE}'/model_data/ocean/restart' declare -rx COM_OCEAN_INPUT_TMPL=${COM_BASE}'/model_data/ocean/input' declare -rx COM_OCEAN_ANALYSIS_TMPL=${COM_BASE}'/analysis/ocean' +declare -rx COM_OCEAN_BMATRIX_TMPL=${COM_BASE}'/bmatrix/ocean' declare -rx COM_OCEAN_NETCDF_TMPL=${COM_BASE}'/products/ocean/netcdf' declare -rx COM_OCEAN_GRIB_TMPL=${COM_BASE}'/products/ocean/grib2' declare -rx COM_OCEAN_GRIB_GRID_TMPL=${COM_OCEAN_GRIB_TMPL}'/${GRID}' declare -rx COM_ICE_ANALYSIS_TMPL=${COM_BASE}'/analysis/ice' +declare -rx COM_ICE_BMATRIX_TMPL=${COM_BASE}'/bmatrix/ice' declare -rx COM_ICE_INPUT_TMPL=${COM_BASE}'/model_data/ice/input' declare -rx COM_ICE_HISTORY_TMPL=${COM_BASE}'/model_data/ice/history' declare -rx COM_ICE_RESTART_TMPL=${COM_BASE}'/model_data/ice/restart' diff --git a/parm/config/gfs/config.eobs b/parm/config/gfs/config.eobs index 2f61b3bd42..7b7823e764 100644 --- a/parm/config/gfs/config.eobs +++ b/parm/config/gfs/config.eobs @@ -15,7 +15,7 @@ export RERUN_EOMGGRP="YES" # GSI namelist options related to observer for EnKF export OBSINPUT_INVOBS="dmesh(1)=225.0,dmesh(2)=225.0,dmesh(3)=225.0,dmesh(4)=100.0" export OBSQC_INVOBS="tcp_width=60.0,tcp_ermin=2.0,tcp_ermax=12.0" -if [ ${LEVS} = "128" ]; then +if (( LEVS == 128 )); then export GRIDOPTS_INVOBS="nlayers(63)=1,nlayers(64)=1," export SETUP_INVOBS="gpstop=55,nsig_ext=56," fi diff --git a/parm/config/gfs/config.marinebmat b/parm/config/gfs/config.marinebmat new file mode 100644 index 0000000000..d88739dced --- /dev/null +++ b/parm/config/gfs/config.marinebmat @@ -0,0 +1,11 @@ +#!/bin/bash + +########## config.marinebmat ########## +# configuration for the marine B-matrix + +echo "BEGIN: config.marinebmat" + +# Get task specific resources +. "${EXPDIR}/config.resources" marinebmat + +echo "END: config.marinebmat" diff --git a/parm/config/gfs/config.ocnanal b/parm/config/gfs/config.ocnanal index 367e570ec8..4d58f2dedf 100644 --- a/parm/config/gfs/config.ocnanal +++ b/parm/config/gfs/config.ocnanal @@ -6,21 +6,15 @@ echo "BEGIN: config.ocnanal" export OBS_YAML_DIR="${HOMEgfs}/sorc/gdas.cd/parm/soca/obs/config" -export OBS_LIST=@SOCA_OBS_LIST@ -export OBS_YAML="${OBS_LIST}" -export FV3JEDI_STAGE_YAML="${HOMEgfs}/sorc/gdas.cd/test/soca/testinput/dumy.yaml" +export OBS_LIST=@SOCA_OBS_LIST@ # TODO(GA): doesn't look necessary as is to have +export OBS_YAML="${OBS_LIST}" # OBS_LIST and OBS_YAML pick one or add logic export SOCA_INPUT_FIX_DIR=@SOCA_INPUT_FIX_DIR@ -export SOCA_VARS=tocn,socn,ssh -export SABER_BLOCKS_YAML=@SABER_BLOCKS_YAML@ export SOCA_NINNER=@SOCA_NINNER@ -export CASE_ANL=@CASE_ANL@ export DOMAIN_STACK_SIZE=116640000 #TODO: Make the stack size resolution dependent -export JEDI_BIN=${HOMEgfs}/sorc/gdas.cd/build/bin -export SOCA_FIX_STAGE_YAML_TMPL="${PARMgfs}/gdas/soca/soca_fix_stage.yaml.j2" export SOCA_ENS_BKG_STAGE_YAML_TMPL="${PARMgfs}/gdas/soca/soca_ens_bkg_stage.yaml.j2" +export SOCA_FIX_YAML_TMPL="${PARMgfs}/gdas/soca/soca_fix_stage_${OCNRES}.yaml.j2" -# NICAS -export NICAS_RESOL=@NICAS_RESOL@ -export NICAS_GRID_SIZE=@NICAS_GRID_SIZE@ +export JEDI_BIN=${HOMEgfs}/sorc/gdas.cd/build/bin # TODO(GA): remove once analysis "run" + # and "checkpoint" are refactored echo "END: config.ocnanal" diff --git a/parm/config/gfs/config.ocnanalbmat b/parm/config/gfs/config.ocnanalbmat deleted file mode 100644 index 024da5f51b..0000000000 --- a/parm/config/gfs/config.ocnanalbmat +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -########## config.ocnanalbmat ########## -# Ocn Analysis specific - -echo "BEGIN: config.ocnanalbmat" - -# Get task specific resources -. "${EXPDIR}/config.resources" ocnanalbmat - -echo "END: config.ocnanalbmat" diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources index 81677d1502..ebdfd5d713 100644 --- a/parm/config/gfs/config.resources +++ b/parm/config/gfs/config.resources @@ -26,7 +26,7 @@ if (( $# != 1 )); then echo "waveinit waveprep wavepostsbs wavepostbndpnt wavepostbndpntbll wavepostpnt" echo "wavegempak waveawipsbulls waveawipsgridded" echo "postsnd awips gempak npoess" - echo "ocnanalprep prepoceanobs ocnanalbmat ocnanalrun ocnanalecen marineanalletkf ocnanalchkpt ocnanalpost ocnanalvrfy" + echo "ocnanalprep prepoceanobs marinebmat ocnanalrun ocnanalecen marineanalletkf ocnanalchkpt ocnanalpost ocnanalvrfy" exit 1 fi @@ -439,7 +439,8 @@ case ${step} in memory="48GB" ;; - "ocnanalbmat") + "marinebmat") + npes=16 ntasks=16 case ${OCNRES} in "025") ntasks=480;; @@ -635,134 +636,128 @@ case ${step} in "fcst" | "efcs") export is_exclusive=True - if [[ "${step}" == "fcst" ]]; then - _RUN_LIST=${RUN:-"gdas gfs"} - elif [[ "${step}" == "efcs" ]]; then - _RUN_LIST=${RUN:-"enkfgdas enkfgfs"} - fi - - # During workflow creation, we need resources for all RUNs and RUN is undefined - for _RUN in ${_RUN_LIST}; do - if [[ "${_RUN}" =~ "gfs" ]]; then - export layout_x=${layout_x_gfs} - export layout_y=${layout_y_gfs} - export WRITE_GROUP=${WRITE_GROUP_GFS} - export WRTTASK_PER_GROUP_PER_THREAD=${WRTTASK_PER_GROUP_PER_THREAD_GFS} - ntasks_fv3=${ntasks_fv3_gfs} - ntasks_quilt=${ntasks_quilt_gfs} - nthreads_fv3=${nthreads_fv3_gfs} - nthreads_ufs=${nthreads_ufs_gfs} - # Will not be set if we are skipping the mediator - nthreads_mediator=${nthreads_mediator_gfs:-} - elif [[ "${_RUN}" =~ "gdas" ]]; then - export layout_x=${layout_x_gdas} - export layout_y=${layout_y_gdas} - export WRITE_GROUP=${WRITE_GROUP_GDAS} - export WRTTASK_PER_GROUP_PER_THREAD=${WRTTASK_PER_GROUP_PER_THREAD_GDAS} - ntasks_fv3=${ntasks_fv3_gdas} - ntasks_quilt=${ntasks_quilt_gdas} - nthreads_fv3=${nthreads_fv3_gdas} - nthreads_ufs=${nthreads_ufs_gdas} - nthreads_mediator=${nthreads_mediator_gdas:-} + _RUN=${RUN:-"gfs"} + _RUN=${RUN/enkf/} + + # Declare variables from config.ufs based on _RUN + # Export layout and write task variables, but not ntasks/threads + # Capitalize _RUN for write tasks + for var in layout_x layout_y ntasks_fv3 ntasks_quilt nthreads_fv3 nthreads_ufs \ + WRITE_GROUP WRTTASK_PER_GROUP_PER_THREAD; do + if [[ ${var} =~ "layout" ]]; then + ufs_var_name="${var}_${_RUN}" + declare -x "${var}"="${!ufs_var_name}" + elif [[ ${var} =~ "WR" ]]; then + ufs_var_name="${var}_${_RUN^^}" + declare -x "${var}"="${!ufs_var_name}" + else + ufs_var_name="${var}_${_RUN}" + declare "${var}"="${!ufs_var_name}" fi + done - # Determine if using ESMF-managed threading or traditional threading - # If using traditional threading, set them to 1 - if [[ "${USE_ESMF_THREADING:-}" == "YES" ]]; then - export UFS_THREADS=1 - else # traditional threading - export UFS_THREADS=${nthreads_ufs:-1} - nthreads_fv3=1 - nthreads_mediator=1 - [[ "${DO_WAVE}" == "YES" ]] && nthreads_ww3=1 - [[ "${DO_OCN}" == "YES" ]] && nthreads_mom6=1 - [[ "${DO_ICE}" == "YES" ]] && nthreads_cice6=1 - fi + # Will not set mediator threads if we are skipping the mediator + if [[ ${_RUN} == "gfs" ]]; then + nthreads_mediator=${nthreads_mediator_gfs:-} + elif [[ ${_RUN} == "gdas" ]]; then + nthreads_mediator=${nthreads_mediator_gdas:-} + fi - if (( ntiles > 6 )); then - export layout_x_nest=${layout_x_nest:-10} - export layout_y_nest=${layout_y_nest:-10} - export npx_nest=${npx_nest:-1441} - export npy_nest=${npy_nest:-961} - fi + # Determine if using ESMF-managed threading or traditional threading + # If using traditional threading, set them to 1 + if [[ "${USE_ESMF_THREADING:-}" == "YES" ]]; then + export UFS_THREADS=1 + else # traditional threading + export UFS_THREADS=${nthreads_ufs:-1} + nthreads_fv3=1 + nthreads_mediator=1 + [[ "${DO_WAVE}" == "YES" ]] && nthreads_ww3=1 + [[ "${DO_OCN}" == "YES" ]] && nthreads_mom6=1 + [[ "${DO_ICE}" == "YES" ]] && nthreads_cice6=1 + fi - # PETS for the atmosphere dycore - (( FV3PETS = ntasks_fv3 * nthreads_fv3 )) - echo "FV3 using (nthreads, PETS) = (${nthreads_fv3}, ${FV3PETS})" + if (( ntiles > 6 )); then + export layout_x_nest=${layout_x_nest:-10} + export layout_y_nest=${layout_y_nest:-10} + export npx_nest=${npx_nest:-1441} + export npy_nest=${npy_nest:-961} + fi - # PETS for quilting - if [[ "${QUILTING:-}" == ".true." ]]; then - (( QUILTPETS = ntasks_quilt * nthreads_fv3 )) - (( WRTTASK_PER_GROUP = WRTTASK_PER_GROUP_PER_THREAD )) - export WRTTASK_PER_GROUP - else - QUILTPETS=0 - fi - echo "QUILT using (nthreads, PETS) = (${nthreads_fv3}, ${QUILTPETS})" - - # Total PETS for the atmosphere component - ATMTHREADS=${nthreads_fv3} - (( ATMPETS = FV3PETS + QUILTPETS )) - export ATMPETS ATMTHREADS - echo "FV3ATM using (nthreads, PETS) = (${ATMTHREADS}, ${ATMPETS})" - - # Total PETS for the coupled model (starting w/ the atmosphere) - NTASKS_TOT=${ATMPETS} - - # The mediator PETS can overlap with other components, usually it lands on the atmosphere tasks. - # However, it is suggested limiting mediator PETS to 300, as it may cause the slow performance. - # See https://docs.google.com/document/d/1bKpi-52t5jIfv2tuNHmQkYUe3hkKsiG_DG_s6Mnukog/edit - # TODO: Update reference when moved to ufs-weather-model RTD - MEDTHREADS=${nthreads_mediator:-1} - MEDPETS=${MEDPETS:-${FV3PETS}} - (( "${MEDPETS}" > 300 )) && MEDPETS=300 - export MEDPETS MEDTHREADS - echo "MEDIATOR using (threads, PETS) = (${MEDTHREADS}, ${MEDPETS})" - - CHMPETS=0; CHMTHREADS=0 - if [[ "${DO_AERO}" == "YES" ]]; then - # GOCART shares the same grid and forecast tasks as FV3 (do not add write grid component tasks). - (( CHMTHREADS = ATMTHREADS )) - (( CHMPETS = FV3PETS )) - # Do not add to NTASKS_TOT - echo "GOCART using (threads, PETS) = (${CHMTHREADS}, ${CHMPETS})" - fi - export CHMPETS CHMTHREADS - - WAVPETS=0; WAVTHREADS=0 - if [[ "${DO_WAVE}" == "YES" ]]; then - (( WAVPETS = ntasks_ww3 * nthreads_ww3 )) - (( WAVTHREADS = nthreads_ww3 )) - echo "WW3 using (threads, PETS) = (${WAVTHREADS}, ${WAVPETS})" - (( NTASKS_TOT = NTASKS_TOT + WAVPETS )) - fi - export WAVPETS WAVTHREADS - - OCNPETS=0; OCNTHREADS=0 - if [[ "${DO_OCN}" == "YES" ]]; then - (( OCNPETS = ntasks_mom6 * nthreads_mom6 )) - (( OCNTHREADS = nthreads_mom6 )) - echo "MOM6 using (threads, PETS) = (${OCNTHREADS}, ${OCNPETS})" - (( NTASKS_TOT = NTASKS_TOT + OCNPETS )) - fi - export OCNPETS OCNTHREADS - - ICEPETS=0; ICETHREADS=0 - if [[ "${DO_ICE}" == "YES" ]]; then - (( ICEPETS = ntasks_cice6 * nthreads_cice6 )) - (( ICETHREADS = nthreads_cice6 )) - echo "CICE6 using (threads, PETS) = (${ICETHREADS}, ${ICEPETS})" - (( NTASKS_TOT = NTASKS_TOT + ICEPETS )) - fi - export ICEPETS ICETHREADS + # PETS for the atmosphere dycore + (( FV3PETS = ntasks_fv3 * nthreads_fv3 )) + echo "FV3 using (nthreads, PETS) = (${nthreads_fv3}, ${FV3PETS})" - echo "Total PETS for ${_RUN} = ${NTASKS_TOT}" + # PETS for quilting + if [[ "${QUILTING:-}" == ".true." ]]; then + (( QUILTPETS = ntasks_quilt * nthreads_fv3 )) + (( WRTTASK_PER_GROUP = WRTTASK_PER_GROUP_PER_THREAD )) + export WRTTASK_PER_GROUP + else + QUILTPETS=0 + fi + echo "QUILT using (nthreads, PETS) = (${nthreads_fv3}, ${QUILTPETS})" + + # Total PETS for the atmosphere component + ATMTHREADS=${nthreads_fv3} + (( ATMPETS = FV3PETS + QUILTPETS )) + export ATMPETS ATMTHREADS + echo "FV3ATM using (nthreads, PETS) = (${ATMTHREADS}, ${ATMPETS})" + + # Total PETS for the coupled model (starting w/ the atmosphere) + NTASKS_TOT=${ATMPETS} + + # The mediator PETS can overlap with other components, usually it lands on the atmosphere tasks. + # However, it is suggested limiting mediator PETS to 300, as it may cause the slow performance. + # See https://docs.google.com/document/d/1bKpi-52t5jIfv2tuNHmQkYUe3hkKsiG_DG_s6Mnukog/edit + # TODO: Update reference when moved to ufs-weather-model RTD + MEDTHREADS=${nthreads_mediator:-1} + MEDPETS=${MEDPETS:-${FV3PETS}} + (( "${MEDPETS}" > 300 )) && MEDPETS=300 + export MEDPETS MEDTHREADS + echo "MEDIATOR using (threads, PETS) = (${MEDTHREADS}, ${MEDPETS})" + + CHMPETS=0; CHMTHREADS=0 + if [[ "${DO_AERO}" == "YES" ]]; then + # GOCART shares the same grid and forecast tasks as FV3 (do not add write grid component tasks). + (( CHMTHREADS = ATMTHREADS )) + (( CHMPETS = FV3PETS )) + # Do not add to NTASKS_TOT + echo "GOCART using (threads, PETS) = (${CHMTHREADS}, ${CHMPETS})" + fi + export CHMPETS CHMTHREADS + + WAVPETS=0; WAVTHREADS=0 + if [[ "${DO_WAVE}" == "YES" ]]; then + (( WAVPETS = ntasks_ww3 * nthreads_ww3 )) + (( WAVTHREADS = nthreads_ww3 )) + echo "WW3 using (threads, PETS) = (${WAVTHREADS}, ${WAVPETS})" + (( NTASKS_TOT = NTASKS_TOT + WAVPETS )) + fi + export WAVPETS WAVTHREADS + + OCNPETS=0; OCNTHREADS=0 + if [[ "${DO_OCN}" == "YES" ]]; then + (( OCNPETS = ntasks_mom6 * nthreads_mom6 )) + (( OCNTHREADS = nthreads_mom6 )) + echo "MOM6 using (threads, PETS) = (${OCNTHREADS}, ${OCNPETS})" + (( NTASKS_TOT = NTASKS_TOT + OCNPETS )) + fi + export OCNPETS OCNTHREADS + + ICEPETS=0; ICETHREADS=0 + if [[ "${DO_ICE}" == "YES" ]]; then + (( ICEPETS = ntasks_cice6 * nthreads_cice6 )) + (( ICETHREADS = nthreads_cice6 )) + echo "CICE6 using (threads, PETS) = (${ICETHREADS}, ${ICEPETS})" + (( NTASKS_TOT = NTASKS_TOT + ICEPETS )) + fi + export ICEPETS ICETHREADS - declare -x "ntasks_${_RUN}"="${NTASKS_TOT}" - declare -x "threads_per_task_${_RUN}"="${UFS_THREADS}" - declare -x "tasks_per_node_${_RUN}"="${max_tasks_per_node}" + echo "Total PETS for ${RUN:-gfs} = ${NTASKS_TOT}" - done + declare -x "ntasks"="${NTASKS_TOT}" + declare -x "threads_per_task"="${UFS_THREADS}" + declare -x "tasks_per_node"="${max_tasks_per_node}" case "${CASE}" in "C48" | "C96" | "C192") @@ -788,7 +783,7 @@ case ${step} in ;; esac - unset _RUN _RUN_LIST + unset _RUN unset NTASKS_TOT ;; diff --git a/parm/config/gfs/config.resources.HERA b/parm/config/gfs/config.resources.HERA index a683bc79e7..36f50508c3 100644 --- a/parm/config/gfs/config.resources.HERA +++ b/parm/config/gfs/config.resources.HERA @@ -5,8 +5,7 @@ case ${step} in "anal") if [[ "${CASE}" == "C384" ]]; then - export ntasks_gdas=270 - export ntasks_gfs=270 + export ntasks=270 export threads_per_task_anal=8 export tasks_per_node=$(( max_tasks_per_node / threads_per_task )) fi diff --git a/parm/config/gfs/config.resources.JET b/parm/config/gfs/config.resources.JET index 9463e8357b..47b953c0f4 100644 --- a/parm/config/gfs/config.resources.JET +++ b/parm/config/gfs/config.resources.JET @@ -5,8 +5,7 @@ case ${step} in "anal") if [[ "${CASE}" == "C384" ]]; then - export ntasks_gdas=270 - export ntasks_gfs=270 + export ntasks=270 export threads_per_task=8 export tasks_per_node=$(( max_tasks_per_node / threads_per_task )) fi diff --git a/parm/config/gfs/yaml/defaults.yaml b/parm/config/gfs/yaml/defaults.yaml index 2d662a9bcb..da4d587dff 100644 --- a/parm/config/gfs/yaml/defaults.yaml +++ b/parm/config/gfs/yaml/defaults.yaml @@ -22,8 +22,8 @@ base: FHMAX_ENKF_GFS: 12 atmanl: - JCB_ALGO_YAML: "${PARMgfs}/gdas/atm/jcb-prototype_3dvar.yaml.j2" - STATICB_TYPE: "gsibec" + JCB_ALGO_YAML: "${PARMgfs}/gdas/atm/jcb-prototype_3dvar.yaml.j2" + STATICB_TYPE: "gsibec" LAYOUT_X_ATMANL: 8 LAYOUT_Y_ATMANL: 8 IO_LAYOUT_X: 1 @@ -45,16 +45,12 @@ snowanl: IO_LAYOUT_Y: 1 ocnanal: - SOCA_INPUT_FIX_DIR: "/scratch2/NCEPDEV/ocean/Guillaume.Vernieres/data/static/72x35x25/soca" # TODO: These need to go to glopara fix space. - CASE_ANL: "C48" # TODO: Check in gdasapp if used anywhere for SOCA + SOCA_INPUT_FIX_DIR: "${FIXgfs}/gdas/soca/72x35x25/soca" SOCA_OBS_LIST: "${PARMgfs}/gdas/soca/obs/obs_list.yaml" # TODO: This is also repeated in oceanprepobs SOCA_NINNER: 100 - SABER_BLOCKS_YAML: "" - NICAS_RESOL: 1 - NICAS_GRID_SIZE: 15000 prepoceanobs: - SOCA_INPUT_FIX_DIR: "/scratch2/NCEPDEV/ocean/Guillaume.Vernieres/data/static/72x35x25/soca" # TODO: These need to go to glopara fix space. + SOCA_INPUT_FIX_DIR: "${FIXgfs}/gdas/soca/72x35x25/soca" SOCA_OBS_LIST: "${PARMgfs}/gdas/soca/obs/obs_list.yaml" # TODO: This is also repeated in ocnanal OBSPREP_YAML: "${PARMgfs}/gdas/soca/obsprep/obsprep_config.yaml" DMPDIR: "/scratch1/NCEPDEV/global/glopara/data/experimental_obs" diff --git a/scripts/exglobal_marinebmat.py b/scripts/exglobal_marinebmat.py new file mode 100755 index 0000000000..e285e646ac --- /dev/null +++ b/scripts/exglobal_marinebmat.py @@ -0,0 +1,24 @@ +#!/usr/bin/env python3 +# exglobal_marine_bmat_run.py +# This script creates an marineBmat object +# and runs the execute method +# which executes all the steps necessary to create the global marine B-matrix +import os + +from wxflow import Logger, cast_strdict_as_dtypedict +from pygfs.task.marine_bmat import MarineBMat + +# Initialize root logger +logger = Logger(level='DEBUG', colored_log=True) + + +if __name__ == '__main__': + + # Take configuration from environment and cast it as python dictionary + config = cast_strdict_as_dtypedict(os.environ) + + # Create an instance of the MarineBMat task + marineBMat = MarineBMat(config) + marineBMat.initialize() + marineBMat.execute() + marineBMat.finalize() diff --git a/scripts/exglobal_prep_snow_obs.py b/scripts/exglobal_prep_snow_obs.py index d4998a7d84..a6a9070151 100755 --- a/scripts/exglobal_prep_snow_obs.py +++ b/scripts/exglobal_prep_snow_obs.py @@ -21,5 +21,5 @@ # Instantiate the snow prepare task SnowAnl = SnowAnalysis(config) SnowAnl.prepare_GTS() - if f"{ SnowAnl.task_config.cyc }" == '18': + if SnowAnl.task_config.cyc == 0: SnowAnl.prepare_IMS() diff --git a/sorc/gdas.cd b/sorc/gdas.cd index 01a7c4f433..52f41a298b 160000 --- a/sorc/gdas.cd +++ b/sorc/gdas.cd @@ -1 +1 @@ -Subproject commit 01a7c4f433346581dee172044e0cd3bd0fe8bd71 +Subproject commit 52f41a298b4c6b7bbf6f203b6579516819fbbf36 diff --git a/sorc/link_workflow.sh b/sorc/link_workflow.sh index 8694f856b5..9722f5a2b8 100755 --- a/sorc/link_workflow.sh +++ b/sorc/link_workflow.sh @@ -71,8 +71,8 @@ ${LINK_OR_COPY} "${HOMEgfs}/versions/run.${machine}.ver" "${HOMEgfs}/versions/ru case "${machine}" in "wcoss2") FIX_DIR="/lfs/h2/emc/global/noscrub/emc.global/FIX/fix" ;; "hera") FIX_DIR="/scratch1/NCEPDEV/global/glopara/fix" ;; - "orion") FIX_DIR="/work/noaa/global/glopara/fix" ;; - "hercules") FIX_DIR="/work/noaa/global/glopara/fix" ;; + "orion") FIX_DIR="/work/noaa/global/kfriedma/glopara/fix" ;; + "hercules") FIX_DIR="/work/noaa/global/kfriedma/glopara/fix" ;; "jet") FIX_DIR="/lfs4/HFIP/hfv3gfs/glopara/git/fv3gfs/fix" ;; "s4") FIX_DIR="/data/prod/glopara/fix" ;; "gaea") FIX_DIR="/gpfs/f5/ufs-ard/world-shared/global/glopara/data/fix" ;; @@ -237,7 +237,7 @@ if [[ -d "${HOMEgfs}/sorc/gdas.cd" ]]; then cd "${HOMEgfs}/fix" || exit 1 [[ ! -d gdas ]] && mkdir -p gdas cd gdas || exit 1 - for gdas_sub in fv3jedi gsibec obs; do + for gdas_sub in fv3jedi gsibec obs soca; do if [[ -d "${gdas_sub}" ]]; then rm -rf "${gdas_sub}" fi @@ -368,6 +368,7 @@ if [[ -d "${HOMEgfs}/sorc/gdas.cd/build" ]]; then "gdas_soca_gridgen.x" \ "gdas_soca_error_covariance_toolbox.x" \ "gdas_soca_setcorscales.x" \ + "gdas_soca_diagb.x" \ "fv3jedi_plot_field.x" \ "fv3jedi_fv3inc.x" \ "gdas_ens_handler.x" \ diff --git a/ush/calcanl_gfs.py b/ush/calcanl_gfs.py index ea97cacf90..5d97d25dfd 100755 --- a/ush/calcanl_gfs.py +++ b/ush/calcanl_gfs.py @@ -19,7 +19,7 @@ def calcanl_gfs(DoIAU, l4DEnsVar, Write4Danl, ComOut, APrefix, ComIn_Ges, GPrefix, FixDir, atmges_ens_mean, RunDir, NThreads, NEMSGet, IAUHrs, - ExecCMD, ExecCMDMPI, ExecAnl, ExecChgresInc, Run, JEDI): + ExecCMD, ExecCMDMPI, ExecAnl, ExecChgresInc, run, JEDI): print('calcanl_gfs beginning at: ', datetime.datetime.utcnow()) IAUHH = IAUHrs diff --git a/ush/python/pygfs/__init__.py b/ush/python/pygfs/__init__.py index fa6b0b373e..c0b72bbc35 100644 --- a/ush/python/pygfs/__init__.py +++ b/ush/python/pygfs/__init__.py @@ -6,10 +6,12 @@ from .task.aero_analysis import AerosolAnalysis from .task.atm_analysis import AtmAnalysis from .task.atmens_analysis import AtmEnsAnalysis +from .task.marine_bmat import MarineBMat from .task.snow_analysis import SnowAnalysis from .task.upp import UPP from .task.oceanice_products import OceanIceProducts from .task.gfs_forecast import GFSForecast +from .utils import marine_da_utils __docformat__ = "restructuredtext" __version__ = "0.1.0" diff --git a/ush/python/pygfs/task/marine_bmat.py b/ush/python/pygfs/task/marine_bmat.py new file mode 100644 index 0000000000..9d64e621c9 --- /dev/null +++ b/ush/python/pygfs/task/marine_bmat.py @@ -0,0 +1,350 @@ +#!/usr/bin/env python3 + +import os +import glob +from logging import getLogger +import pygfs.utils.marine_da_utils as mdau + +from wxflow import (AttrDict, + FileHandler, + add_to_datetime, to_timedelta, + chdir, + parse_j2yaml, + logit, + Executable, + Task) + +logger = getLogger(__name__.split('.')[-1]) + + +class MarineBMat(Task): + """ + Class for global marine B-matrix tasks + """ + @logit(logger, name="MarineBMat") + def __init__(self, config): + super().__init__(config) + _home_gdas = os.path.join(self.task_config.HOMEgfs, 'sorc', 'gdas.cd') + _calc_scale_exec = os.path.join(self.task_config.HOMEgfs, 'ush', 'soca', 'calc_scales.py') + _window_begin = add_to_datetime(self.task_config.current_cycle, -to_timedelta(f"{self.task_config.assim_freq}H") / 2) + _window_end = add_to_datetime(self.task_config.current_cycle, to_timedelta(f"{self.task_config.assim_freq}H") / 2) + + # compute the relative path from self.task_config.DATA to self.task_config.DATAenspert + if self.task_config.NMEM_ENS > 0: + _enspert_relpath = os.path.relpath(self.task_config.DATAenspert, self.task_config.DATA) + else: + _enspert_relpath = None + + # Create a local dictionary that is repeatedly used across this class + local_dict = AttrDict( + { + 'HOMEgdas': _home_gdas, + 'MARINE_WINDOW_BEGIN': _window_begin, + 'MARINE_WINDOW_END': _window_end, + 'MARINE_WINDOW_MIDDLE': self.task_config.current_cycle, + 'BERROR_YAML_DIR': os.path.join(_home_gdas, 'parm', 'soca', 'berror'), + 'GRID_GEN_YAML': os.path.join(_home_gdas, 'parm', 'soca', 'gridgen', 'gridgen.yaml'), + 'MARINE_ENSDA_STAGE_BKG_YAML_TMPL': os.path.join(_home_gdas, 'parm', 'soca', 'ensda', 'stage_ens_mem.yaml.j2'), + 'MARINE_DET_STAGE_BKG_YAML_TMPL': os.path.join(_home_gdas, 'parm', 'soca', 'soca_det_bkg_stage.yaml.j2'), + 'ENSPERT_RELPATH': _enspert_relpath, + 'CALC_SCALE_EXEC': _calc_scale_exec, + 'APREFIX': f"{self.task_config.RUN}.t{self.task_config.cyc:02d}z.", + } + ) + + # Extend task_config with local_dict + self.task_config = AttrDict(**self.task_config, **local_dict) + + @logit(logger) + def initialize(self: Task) -> None: + """Initialize a global B-matrix + + This method will initialize a global B-Matrix. + This includes: + - staging the deterministic backgrounds (middle of window) + - staging SOCA fix files + - staging static ensemble members (optional) + - staging ensemble members (optional) + - generating the YAML files for the JEDI and GDASApp executables + - creating output directories + """ + super().initialize() + + # stage fix files + logger.info(f"Staging SOCA fix files from {self.task_config.SOCA_INPUT_FIX_DIR}") + soca_fix_list = parse_j2yaml(self.task_config.SOCA_FIX_YAML_TMPL, self.task_config) + FileHandler(soca_fix_list).sync() + + # prepare the MOM6 input.nml + mdau.prep_input_nml(self.task_config) + + # stage backgrounds + # TODO(G): Check ocean backgrounds dates for consistency + bkg_list = parse_j2yaml(self.task_config.MARINE_DET_STAGE_BKG_YAML_TMPL, self.task_config) + FileHandler(bkg_list).sync() + for cice_fname in ['./INPUT/cice.res.nc', './bkg/ice.bkg.f006.nc', './bkg/ice.bkg.f009.nc']: + mdau.cice_hist2fms(cice_fname, cice_fname) + + # stage the grid generation yaml + FileHandler({'copy': [[self.task_config.GRID_GEN_YAML, + os.path.join(self.task_config.DATA, 'gridgen.yaml')]]}).sync() + + # generate the variance partitioning YAML file + logger.debug("Generate variance partitioning YAML file") + diagb_config = parse_j2yaml(path=os.path.join(self.task_config.BERROR_YAML_DIR, 'soca_diagb.yaml.j2'), + data=self.task_config) + diagb_config.save(os.path.join(self.task_config.DATA, 'soca_diagb.yaml')) + + # generate the vertical decorrelation scale YAML file + logger.debug("Generate the vertical correlation scale YAML file") + vtscales_config = parse_j2yaml(path=os.path.join(self.task_config.BERROR_YAML_DIR, 'soca_vtscales.yaml.j2'), + data=self.task_config) + vtscales_config.save(os.path.join(self.task_config.DATA, 'soca_vtscales.yaml')) + + # generate vertical diffusion scale YAML file + logger.debug("Generate vertical diffusion YAML file") + diffvz_config = parse_j2yaml(path=os.path.join(self.task_config.BERROR_YAML_DIR, 'soca_parameters_diffusion_vt.yaml.j2'), + data=self.task_config) + diffvz_config.save(os.path.join(self.task_config.DATA, 'soca_parameters_diffusion_vt.yaml')) + + # generate the horizontal diffusion YAML files + if True: # TODO(G): skip this section once we have optimized the scales + # stage the correlation scale configuration + logger.debug("Generate correlation scale YAML file") + FileHandler({'copy': [[os.path.join(self.task_config.BERROR_YAML_DIR, 'soca_setcorscales.yaml'), + os.path.join(self.task_config.DATA, 'soca_setcorscales.yaml')]]}).sync() + + # generate horizontal diffusion scale YAML file + logger.debug("Generate horizontal diffusion scale YAML file") + diffhz_config = parse_j2yaml(path=os.path.join(self.task_config.BERROR_YAML_DIR, 'soca_parameters_diffusion_hz.yaml.j2'), + data=self.task_config) + diffhz_config.save(os.path.join(self.task_config.DATA, 'soca_parameters_diffusion_hz.yaml')) + + # hybrid EnVAR case + if self.task_config.DOHYBVAR == "YES" or self.task_config.NMEM_ENS > 2: + # stage ensemble membersfiles for use in hybrid background error + logger.debug(f"Stage ensemble members for the hybrid background error") + mdau.stage_ens_mem(self.task_config) + + # generate ensemble recentering/rebalancing YAML file + logger.debug("Generate ensemble recentering YAML file") + ensrecenter_config = parse_j2yaml(path=os.path.join(self.task_config.BERROR_YAML_DIR, 'soca_ensb.yaml.j2'), + data=self.task_config) + ensrecenter_config.save(os.path.join(self.task_config.DATA, 'soca_ensb.yaml')) + + # generate ensemble weights YAML file + logger.debug("Generate ensemble recentering YAML file: {self.task_config.abcd_yaml}") + hybridweights_config = parse_j2yaml(path=os.path.join(self.task_config.BERROR_YAML_DIR, 'soca_ensweights.yaml.j2'), + data=self.task_config) + hybridweights_config.save(os.path.join(self.task_config.DATA, 'soca_ensweights.yaml')) + + # need output dir for ensemble perturbations and static B-matrix + logger.debug("Create empty diagb directories to receive output from executables") + FileHandler({'mkdir': [os.path.join(self.task_config.DATA, 'diagb')]}).sync() + + @logit(logger) + def gridgen(self: Task) -> None: + # link gdas_soca_gridgen.x + mdau.link_executable(self.task_config, 'gdas_soca_gridgen.x') + exec_cmd = Executable(self.task_config.APRUN_MARINEBMAT) + exec_name = os.path.join(self.task_config.DATA, 'gdas_soca_gridgen.x') + exec_cmd.add_default_arg(exec_name) + exec_cmd.add_default_arg('gridgen.yaml') + + mdau.run(exec_cmd) + + @logit(logger) + def variance_partitioning(self: Task) -> None: + # link the variance partitioning executable, gdas_soca_diagb.x + mdau.link_executable(self.task_config, 'gdas_soca_diagb.x') + exec_cmd = Executable(self.task_config.APRUN_MARINEBMAT) + exec_name = os.path.join(self.task_config.DATA, 'gdas_soca_diagb.x') + exec_cmd.add_default_arg(exec_name) + exec_cmd.add_default_arg('soca_diagb.yaml') + + mdau.run(exec_cmd) + + @logit(logger) + def horizontal_diffusion(self: Task) -> None: + """Generate the horizontal diffusion coefficients + """ + # link the executable that computes the correlation scales, gdas_soca_setcorscales.x, + # and prepare the command to run it + mdau.link_executable(self.task_config, 'gdas_soca_setcorscales.x') + exec_cmd = Executable(self.task_config.APRUN_MARINEBMAT) + exec_name = os.path.join(self.task_config.DATA, 'gdas_soca_setcorscales.x') + exec_cmd.add_default_arg(exec_name) + exec_cmd.add_default_arg('soca_setcorscales.yaml') + + # create a files containing the correlation scales + mdau.run(exec_cmd) + + # link the executable that computes the correlation scales, gdas_soca_error_covariance_toolbox.x, + # and prepare the command to run it + mdau.link_executable(self.task_config, 'gdas_soca_error_covariance_toolbox.x') + exec_cmd = Executable(self.task_config.APRUN_MARINEBMAT) + exec_name = os.path.join(self.task_config.DATA, 'gdas_soca_error_covariance_toolbox.x') + exec_cmd.add_default_arg(exec_name) + exec_cmd.add_default_arg('soca_parameters_diffusion_hz.yaml') + + # compute the coefficients of the diffusion operator + mdau.run(exec_cmd) + + @logit(logger) + def vertical_diffusion(self: Task) -> None: + """Generate the vertical diffusion coefficients + """ + # compute the vertical correlation scales based on the MLD + FileHandler({'copy': [[os.path.join(self.task_config.CALC_SCALE_EXEC), + os.path.join(self.task_config.DATA, 'calc_scales.x')]]}).sync() + exec_cmd = Executable("python") + exec_name = os.path.join(self.task_config.DATA, 'calc_scales.x') + exec_cmd.add_default_arg(exec_name) + exec_cmd.add_default_arg('soca_vtscales.yaml') + mdau.run(exec_cmd) + + # link the executable that computes the correlation scales, gdas_soca_error_covariance_toolbox.x, + # and prepare the command to run it + mdau.link_executable(self.task_config, 'gdas_soca_error_covariance_toolbox.x') + exec_cmd = Executable(self.task_config.APRUN_MARINEBMAT) + exec_name = os.path.join(self.task_config.DATA, 'gdas_soca_error_covariance_toolbox.x') + exec_cmd.add_default_arg(exec_name) + exec_cmd.add_default_arg('soca_parameters_diffusion_vt.yaml') + + # compute the coefficients of the diffusion operator + mdau.run(exec_cmd) + + @logit(logger) + def ensemble_perturbations(self: Task) -> None: + """Generate the 3D ensemble of perturbation for the 3DEnVAR + + This method will generate ensemble perturbations re-balanced w.r.t the + deterministic background. + This includes: + - computing a storing the unbalanced ensemble perturbations' statistics + - recentering the ensemble members around the deterministic background and + accounting for the nonlinear steric recentering + - saving the recentered ensemble statistics + """ + mdau.link_executable(self.task_config, 'gdas_ens_handler.x') + exec_cmd = Executable(self.task_config.APRUN_MARINEBMAT) + exec_name = os.path.join(self.task_config.DATA, 'gdas_ens_handler.x') + exec_cmd.add_default_arg(exec_name) + exec_cmd.add_default_arg('soca_ensb.yaml') + + # generate the ensemble perturbations + mdau.run(exec_cmd) + + @logit(logger) + def hybrid_weight(self: Task) -> None: + """Generate the hybrid weights for the 3DEnVAR + + This method will generate the 3D fields hybrid weights for the 3DEnVAR for each + variables. + TODO(G): Currently implemented for the specific case of the static ensemble members only + """ + mdau.link_executable(self.task_config, 'gdas_socahybridweights.x') + exec_cmd = Executable(self.task_config.APRUN_MARINEBMAT) + exec_name = os.path.join(self.task_config.DATA, 'gdas_socahybridweights.x') + exec_cmd.add_default_arg(exec_name) + exec_cmd.add_default_arg('soca_ensweights.yaml') + + # compute the ensemble weights + mdau.run(exec_cmd) + + @logit(logger) + def execute(self: Task) -> None: + """Generate the full B-matrix + + This method will generate the full B-matrix according to the configuration. + """ + chdir(self.task_config.DATA) + self.gridgen() # TODO: This should be optional in case the geometry file was staged + self.variance_partitioning() + self.horizontal_diffusion() # TODO: Make this optional once we've converged on an acceptable set of scales + self.vertical_diffusion() + # hybrid EnVAR case + if self.task_config.DOHYBVAR == "YES" or self.task_config.NMEM_ENS > 2: + self.ensemble_perturbations() # TODO: refactor this from the old scripts + self.hybrid_weight() # TODO: refactor this from the old scripts + + @logit(logger) + def finalize(self: Task) -> None: + """Finalize the global B-matrix job + + This method will finalize the global B-matrix job. + This includes: + - copy the generated static, but cycle dependent background error files to the ROTDIR + - copy the generated YAML file from initialize to the ROTDIR + - keep the re-balanced ensemble perturbation files in DATAenspert + - ... + + """ + # Copy the soca grid if it was created + grid_file = os.path.join(self.task_config.DATA, 'soca_gridspec.nc') + if os.path.exists(grid_file): + logger.info(f"Copying the soca grid file to the ROTDIR") + FileHandler({'copy': [[grid_file, + os.path.join(self.task_config.COMOUT_OCEAN_BMATRIX, 'soca_gridspec.nc')]]}).sync() + + # Copy the diffusion coefficient files to the ROTDIR + logger.info(f"Copying the diffusion coefficient files to the ROTDIR") + diffusion_coeff_list = [] + for diff_type in ['hz', 'vt']: + src = os.path.join(self.task_config.DATA, f"{diff_type}_ocean.nc") + dest = os.path.join(self.task_config.COMOUT_OCEAN_BMATRIX, + f"{self.task_config.APREFIX}{diff_type}_ocean.nc") + diffusion_coeff_list.append([src, dest]) + + src = os.path.join(self.task_config.DATA, f"hz_ice.nc") + dest = os.path.join(self.task_config.COMOUT_ICE_BMATRIX, + f"{self.task_config.APREFIX}hz_ice.nc") + diffusion_coeff_list.append([src, dest]) + + FileHandler({'copy': diffusion_coeff_list}).sync() + + # Copy diag B files to ROTDIR + logger.info(f"Copying diag B files to the ROTDIR") + diagb_list = [] + window_end_iso = self.task_config.MARINE_WINDOW_END.strftime('%Y-%m-%dT%H:%M:%SZ') + + # ocean diag B + src = os.path.join(self.task_config.DATA, 'diagb', f"ocn.bkgerr_stddev.incr.{window_end_iso}.nc") + dst = os.path.join(self.task_config.COMOUT_OCEAN_BMATRIX, + f"{self.task_config.APREFIX}ocean.bkgerr_stddev.nc") + diagb_list.append([src, dst]) + + # ice diag B + src = os.path.join(self.task_config.DATA, 'diagb', f"ice.bkgerr_stddev.incr.{window_end_iso}.nc") + dst = os.path.join(self.task_config.COMOUT_ICE_BMATRIX, + f"{self.task_config.APREFIX}ice.bkgerr_stddev.nc") + diagb_list.append([src, dst]) + + FileHandler({'copy': diagb_list}).sync() + + # Copy the ensemble perturbation diagnostics to the ROTDIR + if self.task_config.DOHYBVAR == "YES" or self.task_config.NMEM_ENS > 3: + window_middle_iso = self.task_config.MARINE_WINDOW_MIDDLE.strftime('%Y-%m-%dT%H:%M:%SZ') + weight_list = [] + src = os.path.join(self.task_config.DATA, f"ocn.ens_weights.incr.{window_middle_iso}.nc") + dst = os.path.join(self.task_config.COMOUT_OCEAN_BMATRIX, + f"{self.task_config.APREFIX}ocean.ens_weights.nc") + weight_list.append([src, dst]) + + src = os.path.join(self.task_config.DATA, f"ice.ens_weights.incr.{window_middle_iso}.nc") + dst = os.path.join(self.task_config.COMOUT_ICE_BMATRIX, + f"{self.task_config.APREFIX}ice.ens_weights.nc") + weight_list.append([src, dst]) + + # TODO(G): missing ssh_steric_stddev, ssh_unbal_stddev, ssh_total_stddev and steric_explained_variance + + FileHandler({'copy': weight_list}).sync() + + # Copy the YAML files to the OCEAN ROTDIR + yamls = glob.glob(os.path.join(self.task_config.DATA, '*.yaml')) + yaml_list = [] + for yaml_file in yamls: + dest = os.path.join(self.task_config.COMOUT_OCEAN_BMATRIX, + f"{self.task_config.APREFIX}{os.path.basename(yaml_file)}") + yaml_list.append([yaml_file, dest]) + FileHandler({'copy': yaml_list}).sync() diff --git a/ush/python/pygfs/task/marine_letkf.py b/ush/python/pygfs/task/marine_letkf.py index 0fdd3d9aba..36c26d594b 100644 --- a/ush/python/pygfs/task/marine_letkf.py +++ b/ush/python/pygfs/task/marine_letkf.py @@ -72,7 +72,7 @@ def initialize(self): ensbkgconf.RUN = 'enkfgdas' soca_ens_bkg_stage_list = parse_j2yaml(self.task_config.SOCA_ENS_BKG_STAGE_YAML_TMPL, ensbkgconf) FileHandler(soca_ens_bkg_stage_list).sync() - soca_fix_stage_list = parse_j2yaml(self.task_config.SOCA_FIX_STAGE_YAML_TMPL, self.task_config) + soca_fix_stage_list = parse_j2yaml(self.task_config.SOCA_FIX_YAML_TMPL, self.task_config) FileHandler(soca_fix_stage_list).sync() letkf_stage_list = parse_j2yaml(self.task_config.MARINE_LETKF_STAGE_YAML_TMPL, self.task_config) FileHandler(letkf_stage_list).sync() diff --git a/ush/python/pygfs/utils/__init__.py b/ush/python/pygfs/utils/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/ush/python/pygfs/utils/marine_da_utils.py b/ush/python/pygfs/utils/marine_da_utils.py new file mode 100644 index 0000000000..016551878b --- /dev/null +++ b/ush/python/pygfs/utils/marine_da_utils.py @@ -0,0 +1,99 @@ +import f90nml +import os +from logging import getLogger +import xarray as xr + +from wxflow import (FileHandler, + logit, + WorkflowException, + AttrDict, + parse_j2yaml, + Executable, + jinja) + +logger = getLogger(__name__.split('.')[-1]) + + +@logit(logger) +def run(exec_cmd: Executable) -> None: + """Run the executable command + """ + logger.info(f"Executing {exec_cmd}") + try: + logger.debug(f"Executing {exec_cmd}") + exec_cmd() + except OSError: + raise OSError(f"Failed to execute {exec_cmd}") + except Exception: + raise WorkflowException(f"An error occured during execution of {exec_cmd}") + + +@logit(logger) +def link_executable(task_config: AttrDict, exe_name: str) -> None: + """Link the executable to the DATA directory + """ + logger.info(f"Link executable {exe_name}") + logger.warn("WARNING: Linking is not permitted per EE2.") + exe_src = os.path.join(task_config.EXECgfs, exe_name) + exe_dest = os.path.join(task_config.DATA, exe_name) + if os.path.exists(exe_dest): + os.remove(exe_dest) + os.symlink(exe_src, exe_dest) + + +@logit(logger) +def prep_input_nml(task_config: AttrDict) -> None: + """Prepare the input.nml file + TODO: Use jinja2 instead of f90nml + """ + # stage input.nml + mom_input_nml_tmpl_src = os.path.join(task_config.HOMEgdas, 'parm', 'soca', 'fms', 'input.nml') + mom_input_nml_tmpl = os.path.join(task_config.DATA, 'mom_input.nml.tmpl') + FileHandler({'copy': [[mom_input_nml_tmpl_src, mom_input_nml_tmpl]]}).sync() + + # swap date and stacksize + domain_stack_size = task_config.DOMAIN_STACK_SIZE + ymdhms = [int(s) for s in task_config.MARINE_WINDOW_END.strftime('%Y,%m,%d,%H,%M,%S').split(',')] + with open(mom_input_nml_tmpl, 'r') as nml_file: + nml = f90nml.read(nml_file) + nml['ocean_solo_nml']['date_init'] = ymdhms + nml['fms_nml']['domains_stack_size'] = int(domain_stack_size) + nml.write('mom_input.nml') + + +@logit(logger) +def cice_hist2fms(input_filename: str, output_filename: str) -> None: + """ Reformat the CICE history file so it can be read by SOCA/FMS + Simple reformatting utility to allow soca/fms to read the CICE history files + """ + + # open the CICE history file + ds = xr.open_dataset(input_filename) + + if 'aicen' in ds.variables and 'hicen' in ds.variables and 'hsnon' in ds.variables: + logger.info(f"*** Already reformatted, skipping.") + return + + # rename the dimensions to xaxis_1 and yaxis_1 + ds = ds.rename({'ni': 'xaxis_1', 'nj': 'yaxis_1'}) + + # rename the variables + ds = ds.rename({'aice_h': 'aicen', 'hi_h': 'hicen', 'hs_h': 'hsnon'}) + + # Save the new netCDF file + ds.to_netcdf(output_filename, mode='w') + + +@logit(logger) +def stage_ens_mem(task_config: AttrDict) -> None: + """ Copy the ensemble members to the DATA directory + Copy the ensemble members to the DATA directory and reformat the CICE history files + """ + # Copy the ensemble members to the DATA directory + logger.info("---------------- Stage ensemble members") + ensbkgconf = AttrDict(task_config) + ensbkgconf.RUN = task_config.GDUMP_ENS + logger.debug(f"{jinja.Jinja(task_config.MARINE_ENSDA_STAGE_BKG_YAML_TMPL, ensbkgconf).render}") + letkf_stage_list = parse_j2yaml(task_config.MARINE_ENSDA_STAGE_BKG_YAML_TMPL, ensbkgconf) + logger.info(f"{letkf_stage_list}") + FileHandler(letkf_stage_list).sync() diff --git a/versions/fix.ver b/versions/fix.ver index 1d54572c0b..6e35e651cf 100644 --- a/versions/fix.ver +++ b/versions/fix.ver @@ -9,6 +9,7 @@ export cpl_ver=20230526 export datm_ver=20220805 export gdas_crtm_ver=20220805 export gdas_fv3jedi_ver=20220805 +export gdas_soca_ver=20240624 export gdas_gsibec_ver=20240416 export gdas_obs_ver=20240213 export glwu_ver=20220805 diff --git a/workflow/applications/gfs_cycled.py b/workflow/applications/gfs_cycled.py index b5da50515c..e049a7d422 100644 --- a/workflow/applications/gfs_cycled.py +++ b/workflow/applications/gfs_cycled.py @@ -43,7 +43,7 @@ def _get_app_configs(self): configs += ['anal', 'analdiag'] if self.do_jediocnvar: - configs += ['prepoceanobs', 'ocnanalprep', 'ocnanalbmat', 'ocnanalrun'] + configs += ['prepoceanobs', 'ocnanalprep', 'marinebmat', 'ocnanalrun'] if self.do_hybvar: configs += ['ocnanalecen'] configs += ['ocnanalchkpt', 'ocnanalpost'] @@ -143,7 +143,7 @@ def get_task_names(self): gdas_gfs_common_tasks_before_fcst += ['anal'] if self.do_jediocnvar: - gdas_gfs_common_tasks_before_fcst += ['prepoceanobs', 'ocnanalprep', 'ocnanalbmat', 'ocnanalrun'] + gdas_gfs_common_tasks_before_fcst += ['prepoceanobs', 'ocnanalprep', 'marinebmat', 'ocnanalrun'] if self.do_hybvar: gdas_gfs_common_tasks_before_fcst += ['ocnanalecen'] gdas_gfs_common_tasks_before_fcst += ['ocnanalchkpt', 'ocnanalpost'] diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py index 60a08d7093..65c9da160f 100644 --- a/workflow/rocoto/gfs_tasks.py +++ b/workflow/rocoto/gfs_tasks.py @@ -654,23 +654,24 @@ def prepoceanobs(self): return task - def ocnanalprep(self): + def marinebmat(self): + + ocean_hist_path = self._template_to_rocoto_cycstring(self._base["COM_OCEAN_HISTORY_TMPL"], {'RUN': 'gdas'}) deps = [] - dep_dict = {'type': 'task', 'name': f'{self.run}prepoceanobs'} - deps.append(rocoto.add_dependency(dep_dict)) - dep_dict = {'type': 'task', 'name': 'gdasfcst', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"} + data = f'{ocean_hist_path}/gdas.ocean.t@Hz.inst.f009.nc' + dep_dict = {'type': 'data', 'data': data, 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"} deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) + dependencies = rocoto.create_dependency(dep=deps) - resources = self.get_resource('ocnanalprep') - task_name = f'{self.run}ocnanalprep' + resources = self.get_resource('marinebmat') + task_name = f'{self.run}marinebmat' task_dict = {'task_name': task_name, 'resources': resources, 'dependency': dependencies, 'envars': self.envars, 'cycledef': self.run.replace('enkf', ''), - 'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalprep.sh', + 'command': f'{self.HOMEgfs}/jobs/rocoto/marinebmat.sh', 'job_name': f'{self.pslot}_{task_name}_@H', 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', 'maxtries': '&MAXTRIES;' @@ -680,21 +681,25 @@ def ocnanalprep(self): return task - def ocnanalbmat(self): + def ocnanalprep(self): deps = [] - dep_dict = {'type': 'task', 'name': f'{self.run}ocnanalprep'} + dep_dict = {'type': 'task', 'name': f'{self.run}prepoceanobs'} deps.append(rocoto.add_dependency(dep_dict)) - dependencies = rocoto.create_dependency(dep=deps) + dep_dict = {'type': 'task', 'name': f'{self.run}marinebmat'} + deps.append(rocoto.add_dependency(dep_dict)) + dep_dict = {'type': 'task', 'name': 'gdasfcst', 'offset': f"-{timedelta_to_HMS(self._base['cycle_interval'])}"} + deps.append(rocoto.add_dependency(dep_dict)) + dependencies = rocoto.create_dependency(dep_condition='and', dep=deps) - resources = self.get_resource('ocnanalbmat') - task_name = f'{self.run}ocnanalbmat' + resources = self.get_resource('ocnanalprep') + task_name = f'{self.run}ocnanalprep' task_dict = {'task_name': task_name, 'resources': resources, 'dependency': dependencies, 'envars': self.envars, 'cycledef': self.run.replace('enkf', ''), - 'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalbmat.sh', + 'command': f'{self.HOMEgfs}/jobs/rocoto/ocnanalprep.sh', 'job_name': f'{self.pslot}_{task_name}_@H', 'log': f'{self.rotdir}/logs/@Y@m@d@H/{task_name}.log', 'maxtries': '&MAXTRIES;' @@ -707,7 +712,7 @@ def ocnanalbmat(self): def ocnanalrun(self): deps = [] - dep_dict = {'type': 'task', 'name': f'{self.run}ocnanalbmat'} + dep_dict = {'type': 'task', 'name': f'{self.run}ocnanalprep'} deps.append(rocoto.add_dependency(dep_dict)) dependencies = rocoto.create_dependency(dep=deps) diff --git a/workflow/rocoto/tasks.py b/workflow/rocoto/tasks.py index bf50f02b41..91a354c119 100644 --- a/workflow/rocoto/tasks.py +++ b/workflow/rocoto/tasks.py @@ -16,7 +16,7 @@ class Tasks: 'prep', 'anal', 'sfcanl', 'analcalc', 'analdiag', 'arch', "cleanup", 'prepatmiodaobs', 'atmanlinit', 'atmanlvar', 'atmanlfv3inc', 'atmanlfinal', 'prepoceanobs', - 'ocnanalprep', 'ocnanalbmat', 'ocnanalrun', 'ocnanalecen', 'ocnanalchkpt', 'ocnanalpost', 'ocnanalvrfy', + 'ocnanalprep', 'marinebmat', 'ocnanalrun', 'ocnanalecen', 'ocnanalchkpt', 'ocnanalpost', 'ocnanalvrfy', 'earc', 'ecen', 'echgres', 'ediag', 'efcs', 'eobs', 'eomg', 'epos', 'esfc', 'eupd', 'atmensanlinit', 'atmensanlletkf', 'atmensanlfv3inc', 'atmensanlfinal',