diff --git a/.gitmodules b/.gitmodules
index ea1b5c06af..3514363414 100644
--- a/.gitmodules
+++ b/.gitmodules
@@ -1,6 +1,6 @@
[submodule "sorc/ufs_model.fd"]
path = sorc/ufs_model.fd
- url = https://github.com/ufs-community/ufs-weather-model
+ url = https://github.com/ufs-community/ufs-weather-model.git
ignore = dirty
[submodule "sorc/wxflow"]
path = sorc/wxflow
diff --git a/env/AWSPW.env b/env/AWSPW.env
index 7d81000f5c..ac949710db 100755
--- a/env/AWSPW.env
+++ b/env/AWSPW.env
@@ -14,8 +14,8 @@ fi
step=$1
-export launcher="mpiexec.hydra"
-export mpmd_opt=""
+export launcher="srun --mpi=pmi2 -l"
+export mpmd_opt="--distribution=block:block --hint=nomultithread --cpus-per-task=1"
# Configure MPI environment
export OMP_STACKSIZE=2048000
@@ -36,7 +36,7 @@ if [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
(( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
(( ntasks = nnodes*${!ppn} ))
# With ESMF threading, the model wants to use the full node
- export APRUN_UFS="${launcher} -n ${ntasks}"
+ export APRUN_UFS="${launcher} -n ${ntasks} ${mpmd_opt}"
unset nprocs ppn nnodes ntasks
elif [[ "${step}" = "post" ]]; then
diff --git a/env/AZUREPW.env b/env/AZUREPW.env
new file mode 100755
index 0000000000..185599682c
--- /dev/null
+++ b/env/AZUREPW.env
@@ -0,0 +1,298 @@
+#! /usr/bin/env bash
+
+if [[ $# -ne 1 ]]; then
+
+ echo "Must specify an input argument to set runtime environment variables!"
+ echo "argument can be any one of the following:"
+ echo "atmanlrun atmensanlrun aeroanlrun snowanl"
+ echo "anal sfcanl fcst post metp"
+ echo "eobs eupd ecen efcs epos"
+ echo "postsnd awips gempak"
+ exit 1
+
+fi
+
+step=$1
+
+export launcher="srun --mpi=pmi2 -l"
+export mpmd_opt="--distribution=block:block --hint=nomultithread --cpus-per-task=1"
+
+#export POSTAMBLE_CMD='report-mem'
+
+# Configure MPI environment
+#export I_MPI_ADJUST_ALLREDUCE=5
+#export MPI_BUFS_PER_PROC=2048
+#export MPI_BUFS_PER_HOST=2048
+#export MPI_GROUP_MAX=256
+#export MPI_MEMMAP_OFF=1
+#export MP_STDOUTMODE="ORDERED"
+export OMP_STACKSIZE=2048000
+export NTHSTACK=1024000000
+#export LD_BIND_NOW=1
+
+ulimit -s unlimited
+ulimit -a
+
+if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_prep))
+
+ export POE="NO"
+ export BACK="NO"
+ export sys_tp="HERA"
+ export launcher_PREP="srun"
+
+elif [[ "${step}" = "prepsnowobs" ]]; then
+
+ export APRUN_CALCFIMS="${launcher} -n 1"
+
+elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}" = "wavepostsbs" ]] || [[ "${step}" = "wavepostbndpnt" ]] || [[ "${step}" = "wavepostbndpntbll" ]] || [[ "${step}" = "wavepostpnt" ]]; then
+
+ export CFP_MP="YES"
+ if [[ "${step}" = "waveprep" ]]; then export MP_PULSE=0 ; fi
+ export wavempexec=${launcher}
+ export wave_mpmd=${mpmd_opt}
+
+elif [[ "${step}" = "atmanlrun" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_atmanlrun))
+
+ export NTHREADS_ATMANL=${nth_atmanlrun:-${nth_max}}
+ [[ ${NTHREADS_ATMANL} -gt ${nth_max} ]] && export NTHREADS_ATMANL=${nth_max}
+ export APRUN_ATMANL="${launcher} -n ${npe_atmanlrun} --cpus-per-task=${NTHREADS_ATMANL}"
+
+elif [[ "${step}" = "atmensanlrun" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_atmensanlrun))
+
+ export NTHREADS_ATMENSANL=${nth_atmensanlrun:-${nth_max}}
+ [[ ${NTHREADS_ATMENSANL} -gt ${nth_max} ]] && export NTHREADS_ATMENSANL=${nth_max}
+ export APRUN_ATMENSANL="${launcher} -n ${npe_atmensanlrun} --cpus-per-task=${NTHREADS_ATMENSANL}"
+
+elif [[ "${step}" = "aeroanlrun" ]]; then
+
+ export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
+
+ nth_max=$((npe_node_max / npe_node_aeroanlrun))
+
+ export NTHREADS_AEROANL=${nth_aeroanlrun:-${nth_max}}
+ [[ ${NTHREADS_AEROANL} -gt ${nth_max} ]] && export NTHREADS_AEROANL=${nth_max}
+ export APRUN_AEROANL="${launcher} -n ${npe_aeroanlrun} --cpus-per-task=${NTHREADS_AEROANL}"
+
+elif [[ "${step}" = "snowanl" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_snowanl))
+
+ export NTHREADS_SNOWANL=${nth_snowanl:-${nth_max}}
+ [[ ${NTHREADS_SNOWANL} -gt ${nth_max} ]] && export NTHREADS_SNOWANL=${nth_max}
+ export APRUN_SNOWANL="${launcher} -n ${npe_snowanl} --cpus-per-task=${NTHREADS_SNOWANL}"
+
+ export APRUN_APPLY_INCR="${launcher} -n 6"
+
+elif [[ "${step}" = "ocnanalbmat" ]]; then
+
+ export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
+
+ export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalbmat}"
+
+elif [[ "${step}" = "ocnanalrun" ]]; then
+
+ export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
+
+ export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalrun}"
+
+elif [[ "${step}" = "ocnanalchkpt" ]]; then
+
+ export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
+
+ export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalchkpt}"
+
+elif [[ "${step}" = "ocnanalecen" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_ocnanalecen))
+
+ export NTHREADS_OCNANALECEN=${nth_ocnanalecen:-${nth_max}}
+ [[ ${NTHREADS_OCNANALECEN} -gt ${nth_max} ]] && export NTHREADS_OCNANALECEN=${nth_max}
+ export APRUN_OCNANALECEN="${launcher} -n ${npe_ocnanalecen} --cpus-per-task=${NTHREADS_OCNANALECEN}"
+
+elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then
+
+ export MKL_NUM_THREADS=4
+ export MKL_CBWR=AUTO
+
+ export CFP_MP=${CFP_MP:-"YES"}
+ export USE_CFP=${USE_CFP:-"YES"}
+ export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
+
+ nth_max=$((npe_node_max / npe_node_anal))
+
+ export NTHREADS_GSI=${nth_anal:-${nth_max}}
+ [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
+ export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_anal}} --cpus-per-task=${NTHREADS_GSI}"
+
+ export NTHREADS_CALCINC=${nth_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
+ export APRUN_CALCINC="${launcher} \$ncmd --cpus-per-task=${NTHREADS_CALCINC}"
+
+ export NTHREADS_CYCLE=${nth_cycle:-12}
+ [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
+ npe_cycle=${ntiles:-6}
+ export APRUN_CYCLE="${launcher} -n ${npe_cycle} --cpus-per-task=${NTHREADS_CYCLE}"
+
+ export NTHREADS_GAUSFCANL=1
+ npe_gausfcanl=${npe_gausfcanl:-1}
+ export APRUN_GAUSFCANL="${launcher} -n ${npe_gausfcanl} --cpus-per-task=${NTHREADS_GAUSFCANL}"
+
+elif [[ "${step}" = "sfcanl" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_sfcanl))
+
+ export NTHREADS_CYCLE=${nth_sfcanl:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
+ npe_sfcanl=${ntiles:-6}
+ export APRUN_CYCLE="${launcher} -n ${npe_sfcanl} --cpus-per-task=${NTHREADS_CYCLE}"
+
+elif [[ "${step}" = "eobs" ]]; then
+
+ export MKL_NUM_THREADS=4
+ export MKL_CBWR=AUTO
+
+ nth_max=$((npe_node_max / npe_node_eobs))
+
+ export NTHREADS_GSI=${nth_eobs:-${nth_max}}
+ [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
+ export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_eobs}} --cpus-per-task=${NTHREADS_GSI}"
+
+ export CFP_MP=${CFP_MP:-"YES"}
+ export USE_CFP=${USE_CFP:-"YES"}
+ export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
+
+elif [[ "${step}" = "eupd" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_eupd))
+
+ export NTHREADS_ENKF=${nth_eupd:-${nth_max}}
+ [[ ${NTHREADS_ENKF} -gt ${nth_max} ]] && export NTHREADS_ENKF=${nth_max}
+ export APRUN_ENKF="${launcher} -n ${npe_enkf:-${npe_eupd}} --cpus-per-task=${NTHREADS_ENKF}"
+
+ export CFP_MP=${CFP_MP:-"YES"}
+ export USE_CFP=${USE_CFP:-"YES"}
+ export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
+
+elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
+
+ if [[ "${CDUMP}" =~ "gfs" ]]; then
+ nprocs="npe_${step}_gfs"
+ ppn="npe_node_${step}_gfs" || ppn="npe_node_${step}"
+ else
+ nprocs="npe_${step}"
+ ppn="npe_node_${step}"
+ fi
+ (( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
+ (( ntasks = nnodes*${!ppn} ))
+ # With ESMF threading, the model wants to use the full node
+ export APRUN_UFS="${launcher} -n ${ntasks}"
+ unset nprocs ppn nnodes ntasks
+
+elif [[ "${step}" = "upp" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_upp))
+
+ export NTHREADS_UPP=${nth_upp:-1}
+ [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max}
+ export APRUN_UPP="${launcher} -n ${npe_upp} --cpus-per-task=${NTHREADS_UPP}"
+
+elif [[ "${step}" = "atmos_products" ]]; then
+
+ export USE_CFP="YES" # Use MPMD for downstream product generation on Hera
+
+elif [[ "${step}" = "oceanice_products" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_oceanice_products))
+
+ export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1}
+ export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}"
+
+elif [[ "${step}" = "ecen" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_ecen))
+
+ export NTHREADS_ECEN=${nth_ecen:-${nth_max}}
+ [[ ${NTHREADS_ECEN} -gt ${nth_max} ]] && export NTHREADS_ECEN=${nth_max}
+ export APRUN_ECEN="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_ECEN}"
+
+ export NTHREADS_CHGRES=${nth_chgres:-12}
+ [[ ${NTHREADS_CHGRES} -gt ${npe_node_max} ]] && export NTHREADS_CHGRES=${npe_node_max}
+ export APRUN_CHGRES="time"
+
+ export NTHREADS_CALCINC=${nth_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
+ export APRUN_CALCINC="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_CALCINC}"
+
+elif [[ "${step}" = "esfc" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_esfc))
+
+ export NTHREADS_ESFC=${nth_esfc:-${nth_max}}
+ [[ ${NTHREADS_ESFC} -gt ${nth_max} ]] && export NTHREADS_ESFC=${nth_max}
+ export APRUN_ESFC="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_ESFC}"
+
+ export NTHREADS_CYCLE=${nth_cycle:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
+ export APRUN_CYCLE="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_CYCLE}"
+
+elif [[ "${step}" = "epos" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_epos))
+
+ export NTHREADS_EPOS=${nth_epos:-${nth_max}}
+ [[ ${NTHREADS_EPOS} -gt ${nth_max} ]] && export NTHREADS_EPOS=${nth_max}
+ export APRUN_EPOS="${launcher} -n ${npe_epos} --cpus-per-task=${NTHREADS_EPOS}"
+
+elif [[ "${step}" = "postsnd" ]]; then
+
+ export CFP_MP="YES"
+
+ nth_max=$((npe_node_max / npe_node_postsnd))
+
+ export NTHREADS_POSTSND=${nth_postsnd:-1}
+ [[ ${NTHREADS_POSTSND} -gt ${nth_max} ]] && export NTHREADS_POSTSND=${nth_max}
+ export APRUN_POSTSND="${launcher} -n ${npe_postsnd} --cpus-per-task=${NTHREADS_POSTSND}"
+
+ export NTHREADS_POSTSNDCFP=${nth_postsndcfp:-1}
+ [[ ${NTHREADS_POSTSNDCFP} -gt ${nth_max} ]] && export NTHREADS_POSTSNDCFP=${nth_max}
+ export APRUN_POSTSNDCFP="${launcher} -n ${npe_postsndcfp} ${mpmd_opt}"
+
+elif [[ "${step}" = "awips" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_awips))
+
+ export NTHREADS_AWIPS=${nth_awips:-2}
+ [[ ${NTHREADS_AWIPS} -gt ${nth_max} ]] && export NTHREADS_AWIPS=${nth_max}
+ export APRUN_AWIPSCFP="${launcher} -n ${npe_awips} ${mpmd_opt}"
+
+elif [[ "${step}" = "gempak" ]]; then
+
+ export CFP_MP="YES"
+
+ if [[ ${CDUMP} == "gfs" ]]; then
+ npe_gempak=${npe_gempak_gfs}
+ npe_node_gempak=${npe_node_gempak_gfs}
+ fi
+
+ nth_max=$((npe_node_max / npe_node_gempak))
+
+ export NTHREADS_GEMPAK=${nth_gempak:-1}
+ [[ ${NTHREADS_GEMPAK} -gt ${nth_max} ]] && export NTHREADS_GEMPAK=${nth_max}
+ export APRUN="${launcher} -n ${npe_gempak} ${mpmd_opt}"
+
+
+elif [[ "${step}" = "fit2obs" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_fit2obs))
+
+ export NTHREADS_FIT2OBS=${nth_fit2obs:-1}
+ [[ ${NTHREADS_FIT2OBS} -gt ${nth_max} ]] && export NTHREADS_FIT2OBS=${nth_max}
+ export MPIRUN="${launcher} -n ${npe_fit2obs} --cpus-per-task=${NTHREADS_FIT2OBS}"
+
+fi
diff --git a/env/GOOGLEPW.env b/env/GOOGLEPW.env
new file mode 100755
index 0000000000..185599682c
--- /dev/null
+++ b/env/GOOGLEPW.env
@@ -0,0 +1,298 @@
+#! /usr/bin/env bash
+
+if [[ $# -ne 1 ]]; then
+
+ echo "Must specify an input argument to set runtime environment variables!"
+ echo "argument can be any one of the following:"
+ echo "atmanlrun atmensanlrun aeroanlrun snowanl"
+ echo "anal sfcanl fcst post metp"
+ echo "eobs eupd ecen efcs epos"
+ echo "postsnd awips gempak"
+ exit 1
+
+fi
+
+step=$1
+
+export launcher="srun --mpi=pmi2 -l"
+export mpmd_opt="--distribution=block:block --hint=nomultithread --cpus-per-task=1"
+
+#export POSTAMBLE_CMD='report-mem'
+
+# Configure MPI environment
+#export I_MPI_ADJUST_ALLREDUCE=5
+#export MPI_BUFS_PER_PROC=2048
+#export MPI_BUFS_PER_HOST=2048
+#export MPI_GROUP_MAX=256
+#export MPI_MEMMAP_OFF=1
+#export MP_STDOUTMODE="ORDERED"
+export OMP_STACKSIZE=2048000
+export NTHSTACK=1024000000
+#export LD_BIND_NOW=1
+
+ulimit -s unlimited
+ulimit -a
+
+if [[ "${step}" = "prep" ]] || [[ "${step}" = "prepbufr" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_prep))
+
+ export POE="NO"
+ export BACK="NO"
+ export sys_tp="HERA"
+ export launcher_PREP="srun"
+
+elif [[ "${step}" = "prepsnowobs" ]]; then
+
+ export APRUN_CALCFIMS="${launcher} -n 1"
+
+elif [[ "${step}" = "waveinit" ]] || [[ "${step}" = "waveprep" ]] || [[ "${step}" = "wavepostsbs" ]] || [[ "${step}" = "wavepostbndpnt" ]] || [[ "${step}" = "wavepostbndpntbll" ]] || [[ "${step}" = "wavepostpnt" ]]; then
+
+ export CFP_MP="YES"
+ if [[ "${step}" = "waveprep" ]]; then export MP_PULSE=0 ; fi
+ export wavempexec=${launcher}
+ export wave_mpmd=${mpmd_opt}
+
+elif [[ "${step}" = "atmanlrun" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_atmanlrun))
+
+ export NTHREADS_ATMANL=${nth_atmanlrun:-${nth_max}}
+ [[ ${NTHREADS_ATMANL} -gt ${nth_max} ]] && export NTHREADS_ATMANL=${nth_max}
+ export APRUN_ATMANL="${launcher} -n ${npe_atmanlrun} --cpus-per-task=${NTHREADS_ATMANL}"
+
+elif [[ "${step}" = "atmensanlrun" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_atmensanlrun))
+
+ export NTHREADS_ATMENSANL=${nth_atmensanlrun:-${nth_max}}
+ [[ ${NTHREADS_ATMENSANL} -gt ${nth_max} ]] && export NTHREADS_ATMENSANL=${nth_max}
+ export APRUN_ATMENSANL="${launcher} -n ${npe_atmensanlrun} --cpus-per-task=${NTHREADS_ATMENSANL}"
+
+elif [[ "${step}" = "aeroanlrun" ]]; then
+
+ export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
+
+ nth_max=$((npe_node_max / npe_node_aeroanlrun))
+
+ export NTHREADS_AEROANL=${nth_aeroanlrun:-${nth_max}}
+ [[ ${NTHREADS_AEROANL} -gt ${nth_max} ]] && export NTHREADS_AEROANL=${nth_max}
+ export APRUN_AEROANL="${launcher} -n ${npe_aeroanlrun} --cpus-per-task=${NTHREADS_AEROANL}"
+
+elif [[ "${step}" = "snowanl" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_snowanl))
+
+ export NTHREADS_SNOWANL=${nth_snowanl:-${nth_max}}
+ [[ ${NTHREADS_SNOWANL} -gt ${nth_max} ]] && export NTHREADS_SNOWANL=${nth_max}
+ export APRUN_SNOWANL="${launcher} -n ${npe_snowanl} --cpus-per-task=${NTHREADS_SNOWANL}"
+
+ export APRUN_APPLY_INCR="${launcher} -n 6"
+
+elif [[ "${step}" = "ocnanalbmat" ]]; then
+
+ export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
+
+ export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalbmat}"
+
+elif [[ "${step}" = "ocnanalrun" ]]; then
+
+ export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
+
+ export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalrun}"
+
+elif [[ "${step}" = "ocnanalchkpt" ]]; then
+
+ export APRUNCFP="${launcher} -n \$ncmd --multi-prog"
+
+ export APRUN_OCNANAL="${launcher} -n ${npe_ocnanalchkpt}"
+
+elif [[ "${step}" = "ocnanalecen" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_ocnanalecen))
+
+ export NTHREADS_OCNANALECEN=${nth_ocnanalecen:-${nth_max}}
+ [[ ${NTHREADS_OCNANALECEN} -gt ${nth_max} ]] && export NTHREADS_OCNANALECEN=${nth_max}
+ export APRUN_OCNANALECEN="${launcher} -n ${npe_ocnanalecen} --cpus-per-task=${NTHREADS_OCNANALECEN}"
+
+elif [[ "${step}" = "anal" ]] || [[ "${step}" = "analcalc" ]]; then
+
+ export MKL_NUM_THREADS=4
+ export MKL_CBWR=AUTO
+
+ export CFP_MP=${CFP_MP:-"YES"}
+ export USE_CFP=${USE_CFP:-"YES"}
+ export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
+
+ nth_max=$((npe_node_max / npe_node_anal))
+
+ export NTHREADS_GSI=${nth_anal:-${nth_max}}
+ [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
+ export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_anal}} --cpus-per-task=${NTHREADS_GSI}"
+
+ export NTHREADS_CALCINC=${nth_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
+ export APRUN_CALCINC="${launcher} \$ncmd --cpus-per-task=${NTHREADS_CALCINC}"
+
+ export NTHREADS_CYCLE=${nth_cycle:-12}
+ [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
+ npe_cycle=${ntiles:-6}
+ export APRUN_CYCLE="${launcher} -n ${npe_cycle} --cpus-per-task=${NTHREADS_CYCLE}"
+
+ export NTHREADS_GAUSFCANL=1
+ npe_gausfcanl=${npe_gausfcanl:-1}
+ export APRUN_GAUSFCANL="${launcher} -n ${npe_gausfcanl} --cpus-per-task=${NTHREADS_GAUSFCANL}"
+
+elif [[ "${step}" = "sfcanl" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_sfcanl))
+
+ export NTHREADS_CYCLE=${nth_sfcanl:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
+ npe_sfcanl=${ntiles:-6}
+ export APRUN_CYCLE="${launcher} -n ${npe_sfcanl} --cpus-per-task=${NTHREADS_CYCLE}"
+
+elif [[ "${step}" = "eobs" ]]; then
+
+ export MKL_NUM_THREADS=4
+ export MKL_CBWR=AUTO
+
+ nth_max=$((npe_node_max / npe_node_eobs))
+
+ export NTHREADS_GSI=${nth_eobs:-${nth_max}}
+ [[ ${NTHREADS_GSI} -gt ${nth_max} ]] && export NTHREADS_GSI=${nth_max}
+ export APRUN_GSI="${launcher} -n ${npe_gsi:-${npe_eobs}} --cpus-per-task=${NTHREADS_GSI}"
+
+ export CFP_MP=${CFP_MP:-"YES"}
+ export USE_CFP=${USE_CFP:-"YES"}
+ export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
+
+elif [[ "${step}" = "eupd" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_eupd))
+
+ export NTHREADS_ENKF=${nth_eupd:-${nth_max}}
+ [[ ${NTHREADS_ENKF} -gt ${nth_max} ]] && export NTHREADS_ENKF=${nth_max}
+ export APRUN_ENKF="${launcher} -n ${npe_enkf:-${npe_eupd}} --cpus-per-task=${NTHREADS_ENKF}"
+
+ export CFP_MP=${CFP_MP:-"YES"}
+ export USE_CFP=${USE_CFP:-"YES"}
+ export APRUNCFP="${launcher} -n \$ncmd ${mpmd_opt}"
+
+elif [[ "${step}" = "fcst" ]] || [[ "${step}" = "efcs" ]]; then
+
+ if [[ "${CDUMP}" =~ "gfs" ]]; then
+ nprocs="npe_${step}_gfs"
+ ppn="npe_node_${step}_gfs" || ppn="npe_node_${step}"
+ else
+ nprocs="npe_${step}"
+ ppn="npe_node_${step}"
+ fi
+ (( nnodes = (${!nprocs}+${!ppn}-1)/${!ppn} ))
+ (( ntasks = nnodes*${!ppn} ))
+ # With ESMF threading, the model wants to use the full node
+ export APRUN_UFS="${launcher} -n ${ntasks}"
+ unset nprocs ppn nnodes ntasks
+
+elif [[ "${step}" = "upp" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_upp))
+
+ export NTHREADS_UPP=${nth_upp:-1}
+ [[ ${NTHREADS_UPP} -gt ${nth_max} ]] && export NTHREADS_UPP=${nth_max}
+ export APRUN_UPP="${launcher} -n ${npe_upp} --cpus-per-task=${NTHREADS_UPP}"
+
+elif [[ "${step}" = "atmos_products" ]]; then
+
+ export USE_CFP="YES" # Use MPMD for downstream product generation on Hera
+
+elif [[ "${step}" = "oceanice_products" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_oceanice_products))
+
+ export NTHREADS_OCNICEPOST=${nth_oceanice_products:-1}
+ export APRUN_OCNICEPOST="${launcher} -n 1 --cpus-per-task=${NTHREADS_OCNICEPOST}"
+
+elif [[ "${step}" = "ecen" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_ecen))
+
+ export NTHREADS_ECEN=${nth_ecen:-${nth_max}}
+ [[ ${NTHREADS_ECEN} -gt ${nth_max} ]] && export NTHREADS_ECEN=${nth_max}
+ export APRUN_ECEN="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_ECEN}"
+
+ export NTHREADS_CHGRES=${nth_chgres:-12}
+ [[ ${NTHREADS_CHGRES} -gt ${npe_node_max} ]] && export NTHREADS_CHGRES=${npe_node_max}
+ export APRUN_CHGRES="time"
+
+ export NTHREADS_CALCINC=${nth_calcinc:-1}
+ [[ ${NTHREADS_CALCINC} -gt ${nth_max} ]] && export NTHREADS_CALCINC=${nth_max}
+ export APRUN_CALCINC="${launcher} -n ${npe_ecen} --cpus-per-task=${NTHREADS_CALCINC}"
+
+elif [[ "${step}" = "esfc" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_esfc))
+
+ export NTHREADS_ESFC=${nth_esfc:-${nth_max}}
+ [[ ${NTHREADS_ESFC} -gt ${nth_max} ]] && export NTHREADS_ESFC=${nth_max}
+ export APRUN_ESFC="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_ESFC}"
+
+ export NTHREADS_CYCLE=${nth_cycle:-14}
+ [[ ${NTHREADS_CYCLE} -gt ${npe_node_max} ]] && export NTHREADS_CYCLE=${npe_node_max}
+ export APRUN_CYCLE="${launcher} -n ${npe_esfc} --cpus-per-task=${NTHREADS_CYCLE}"
+
+elif [[ "${step}" = "epos" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_epos))
+
+ export NTHREADS_EPOS=${nth_epos:-${nth_max}}
+ [[ ${NTHREADS_EPOS} -gt ${nth_max} ]] && export NTHREADS_EPOS=${nth_max}
+ export APRUN_EPOS="${launcher} -n ${npe_epos} --cpus-per-task=${NTHREADS_EPOS}"
+
+elif [[ "${step}" = "postsnd" ]]; then
+
+ export CFP_MP="YES"
+
+ nth_max=$((npe_node_max / npe_node_postsnd))
+
+ export NTHREADS_POSTSND=${nth_postsnd:-1}
+ [[ ${NTHREADS_POSTSND} -gt ${nth_max} ]] && export NTHREADS_POSTSND=${nth_max}
+ export APRUN_POSTSND="${launcher} -n ${npe_postsnd} --cpus-per-task=${NTHREADS_POSTSND}"
+
+ export NTHREADS_POSTSNDCFP=${nth_postsndcfp:-1}
+ [[ ${NTHREADS_POSTSNDCFP} -gt ${nth_max} ]] && export NTHREADS_POSTSNDCFP=${nth_max}
+ export APRUN_POSTSNDCFP="${launcher} -n ${npe_postsndcfp} ${mpmd_opt}"
+
+elif [[ "${step}" = "awips" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_awips))
+
+ export NTHREADS_AWIPS=${nth_awips:-2}
+ [[ ${NTHREADS_AWIPS} -gt ${nth_max} ]] && export NTHREADS_AWIPS=${nth_max}
+ export APRUN_AWIPSCFP="${launcher} -n ${npe_awips} ${mpmd_opt}"
+
+elif [[ "${step}" = "gempak" ]]; then
+
+ export CFP_MP="YES"
+
+ if [[ ${CDUMP} == "gfs" ]]; then
+ npe_gempak=${npe_gempak_gfs}
+ npe_node_gempak=${npe_node_gempak_gfs}
+ fi
+
+ nth_max=$((npe_node_max / npe_node_gempak))
+
+ export NTHREADS_GEMPAK=${nth_gempak:-1}
+ [[ ${NTHREADS_GEMPAK} -gt ${nth_max} ]] && export NTHREADS_GEMPAK=${nth_max}
+ export APRUN="${launcher} -n ${npe_gempak} ${mpmd_opt}"
+
+
+elif [[ "${step}" = "fit2obs" ]]; then
+
+ nth_max=$((npe_node_max / npe_node_fit2obs))
+
+ export NTHREADS_FIT2OBS=${nth_fit2obs:-1}
+ [[ ${NTHREADS_FIT2OBS} -gt ${nth_max} ]] && export NTHREADS_FIT2OBS=${nth_max}
+ export MPIRUN="${launcher} -n ${npe_fit2obs} --cpus-per-task=${NTHREADS_FIT2OBS}"
+
+fi
diff --git a/modulefiles/module_base.noaacloud.lua b/modulefiles/module_base.noaacloud.lua
new file mode 100644
index 0000000000..fb5b283087
--- /dev/null
+++ b/modulefiles/module_base.noaacloud.lua
@@ -0,0 +1,51 @@
+help([[
+Load environment to run GFS on noaacloud
+]])
+
+local spack_mod_path=(os.getenv("spack_mod_path") or "None")
+prepend_path("MODULEPATH", spack_mod_path)
+
+load(pathJoin("stack-intel", (os.getenv("stack_intel_ver") or "None")))
+load(pathJoin("stack-intel-oneapi-mpi", (os.getenv("stack_impi_ver") or "None")))
+load(pathJoin("python", (os.getenv("python_ver") or "None")))
+
+--load(pathJoin("hpss", (os.getenv("hpss_ver") or "None")))
+load(pathJoin("gempak", (os.getenv("gempak_ver") or "None")))
+load(pathJoin("ncl", (os.getenv("ncl_ver") or "None")))
+load(pathJoin("jasper", (os.getenv("jasper_ver") or "None")))
+load(pathJoin("libpng", (os.getenv("libpng_ver") or "None")))
+load(pathJoin("cdo", (os.getenv("cdo_ver") or "None")))
+--load(pathJoin("R", (os.getenv("R_ver") or "None")))
+
+load(pathJoin("hdf5", (os.getenv("hdf5_ver") or "None")))
+load(pathJoin("netcdf-c", (os.getenv("netcdf_c_ver") or "None")))
+load(pathJoin("netcdf-fortran", (os.getenv("netcdf_fortran_ver") or "None")))
+
+load(pathJoin("nco", (os.getenv("nco_ver") or "None")))
+load(pathJoin("prod_util", (os.getenv("prod_util_ver") or "None")))
+load(pathJoin("grib-util", (os.getenv("grib_util_ver") or "None")))
+load(pathJoin("g2tmpl", (os.getenv("g2tmpl_ver") or "None")))
+load(pathJoin("gsi-ncdiag", (os.getenv("gsi_ncdiag_ver") or "None")))
+load(pathJoin("crtm", (os.getenv("crtm_ver") or "None")))
+load(pathJoin("bufr", (os.getenv("bufr_ver") or "None")))
+load(pathJoin("wgrib2", (os.getenv("wgrib2_ver") or "None")))
+load(pathJoin("py-netcdf4", (os.getenv("py_netcdf4_ver") or "None")))
+load(pathJoin("py-pyyaml", (os.getenv("py_pyyaml_ver") or "None")))
+load(pathJoin("py-jinja2", (os.getenv("py_jinja2_ver") or "None")))
+load(pathJoin("py-pandas", (os.getenv("py_pandas_ver") or "None")))
+load(pathJoin("py-python-dateutil", (os.getenv("py_python_dateutil_ver") or "None")))
+--load(pathJoin("met", (os.getenv("met_ver") or "None")))
+--load(pathJoin("metplus", (os.getenv("metplus_ver") or "None")))
+load(pathJoin("py-xarray", (os.getenv("py_xarray_ver") or "None")))
+
+setenv("WGRIB2","wgrib2")
+setenv("UTILROOT",(os.getenv("prod_util_ROOT") or "None"))
+
+--prepend_path("MODULEPATH", pathJoin("/scratch1/NCEPDEV/global/glopara/git/prepobs/v" .. (os.getenv("prepobs_run_ver") or "None"), "modulefiles"))
+--prepend_path("MODULEPATH", pathJoin("/scratch1/NCEPDEV/global/glopara/git/prepobs/feature-GFSv17_com_reorg_log_update/modulefiles"))
+--load(pathJoin("prepobs", (os.getenv("prepobs_run_ver") or "None")))
+
+--prepend_path("MODULEPATH", pathJoin("/scratch1/NCEPDEV/global/glopara/git/Fit2Obs/v" .. (os.getenv("fit2obs_ver") or "None"), "modulefiles"))
+--load(pathJoin("fit2obs", (os.getenv("fit2obs_ver") or "None")))
+
+whatis("Description: GFS run environment")
diff --git a/modulefiles/module_gwci.noaacloud.lua b/modulefiles/module_gwci.noaacloud.lua
new file mode 100644
index 0000000000..c3142cd60d
--- /dev/null
+++ b/modulefiles/module_gwci.noaacloud.lua
@@ -0,0 +1,15 @@
+help([[
+Load environment to run GFS workflow setup scripts on noaacloud
+]])
+
+prepend_path("MODULEPATH", "/contrib/spack-stack/spack-stack-1.6.0/envs/unified-env/install/modulefiles/Core")
+
+load(pathJoin("stack-intel", os.getenv("2021.3.0")))
+load(pathJoin("stack-intel-oneapi-mpi", os.getenv("2021.3.0")))
+
+load(pathJoin("netcdf-c", os.getenv("4.9.2")))
+load(pathJoin("netcdf-fortran", os.getenv("4.6.1")))
+load(pathJoin("nccmp","1.9.0.1"))
+load(pathJoin("wgrib2", "2.0.8"))
+
+whatis("Description: GFS run setup CI environment")
diff --git a/modulefiles/module_gwsetup.noaacloud.lua b/modulefiles/module_gwsetup.noaacloud.lua
new file mode 100644
index 0000000000..f3845e8d72
--- /dev/null
+++ b/modulefiles/module_gwsetup.noaacloud.lua
@@ -0,0 +1,20 @@
+help([[
+Load environment to run GFS workflow setup scripts on noaacloud
+]])
+
+load(pathJoin("rocoto"))
+
+prepend_path("MODULEPATH", "/contrib/spack-stack/spack-stack-1.6.0/envs/unified-env/install/modulefiles/Core")
+
+local stack_intel_ver=os.getenv("stack_intel_ver") or "2021.3.0"
+local python_ver=os.getenv("python_ver") or "3.10.3"
+
+load(pathJoin("stack-intel", stack_intel_ver))
+load(pathJoin("python", python_ver))
+load("py-jinja2")
+load("py-pyyaml")
+load("py-numpy")
+local git_ver=os.getenv("git_ver") or "1.8.3.1"
+load(pathJoin("git", git_ver))
+
+whatis("Description: GFS run setup environment")
diff --git a/parm/config/gfs/config.base b/parm/config/gfs/config.base
index c53ac908f2..85e5dfb168 100644
--- a/parm/config/gfs/config.base
+++ b/parm/config/gfs/config.base
@@ -72,9 +72,12 @@ export DO_BUFRSND="@DO_BUFRSND@" # BUFR sounding products
export DO_GEMPAK="@DO_GEMPAK@" # GEMPAK products
export DO_AWIPS="@DO_AWIPS@" # AWIPS products
export DO_NPOESS="@DO_NPOESS@" # NPOESS products
-export DO_TRACKER="@DO_TRACKER@" # Hurricane track verification
-export DO_GENESIS="@DO_GENESIS@" # Cyclone genesis verification
-export DO_GENESIS_FSU="@DO_GENESIS_FSU@" # Cyclone genesis verification (FSU)
+#export DO_TRACKER="@DO_TRACKER@" # Hurricane track verification
+#export DO_GENESIS="@DO_GENESIS@" # Cyclone genesis verification
+#export DO_GENESIS_FSU="@DO_GENESIS_FSU@" # Cyclone genesis verification (FSU)
+export DO_TRACKER="NO" # Hurricane track verification
+export DO_GENESIS="NO" # Cyclone genesis verification
+export DO_GENESIS_FSU="NO" # Cyclone genesis verification (FSU)
export DO_VERFOZN="YES" # Ozone data assimilation monitoring
export DO_VERFRAD="YES" # Radiance data assimilation monitoring
export DO_VMINMON="YES" # GSI minimization monitoring
diff --git a/parm/config/gfs/config.resources b/parm/config/gfs/config.resources
index 0972f74f9c..ea0365bfb3 100644
--- a/parm/config/gfs/config.resources
+++ b/parm/config/gfs/config.resources
@@ -59,7 +59,15 @@ case ${machine} in
;;
"AWSPW")
export PARTITION_BATCH="compute"
- npe_node_max=40
+ npe_node_max=36
+ ;;
+ "AZUREPW")
+ export PARTITION_BATCH="compute"
+ npe_node_max=24
+ ;;
+ "GOOGLEPW")
+ export PARTITION_BATCH="compute"
+ npe_node_max=30
;;
"CONTAINER")
npe_node_max=1
@@ -804,7 +812,7 @@ case ${step} in
;;
"atmos_products")
- export wtime_atmos_products="00:15:00"
+ export wtime_atmos_products="01:15:00"
export npe_atmos_products=24
export nth_atmos_products=1
export npe_node_atmos_products="${npe_atmos_products}"
@@ -816,7 +824,7 @@ case ${step} in
;;
"verfozn")
- export wtime_verfozn="00:05:00"
+ export wtime_verfozn="00:15:00"
export npe_verfozn=1
export nth_verfozn=1
export npe_node_verfozn=1
diff --git a/parm/config/gfs/config.stage_ic b/parm/config/gfs/config.stage_ic
index 9956e8af6a..ccfb575156 100644
--- a/parm/config/gfs/config.stage_ic
+++ b/parm/config/gfs/config.stage_ic
@@ -9,10 +9,19 @@ source "${EXPDIR}/config.resources" stage_ic
case "${CASE}" in
"C48" | "C96" | "C192")
- export CPL_ATMIC="workflow_${CASE}_refactored"
- export CPL_ICEIC="workflow_${CASE}_refactored"
- export CPL_OCNIC="workflow_${CASE}_refactored"
- export CPL_WAVIC="workflow_${CASE}_refactored"
+
+ if [ -z ${PW_CSP+x} ]; then
+ export CPL_ATMIC="workflow_${CASE}_refactored"
+ export CPL_ICEIC="workflow_${CASE}_refactored"
+ export CPL_OCNIC="workflow_${CASE}_refactored"
+ export CPL_WAVIC="workflow_${CASE}_refactored"
+ else
+ echo "PW_CSP: $PW_CSP"
+ export CPL_ATMIC="${CASE}"
+ export CPL_ICEIC="${CASE}"
+ export CPL_OCNIC="${CASE}"
+ export CPL_WAVIC="${CASE}"
+ fi
;;
"C384")
export CPL_ATMIC=GEFS-NoahMP-aerosols-p8c_refactored
diff --git a/scripts/exglobal_stage_ic.sh b/scripts/exglobal_stage_ic.sh
index d941fa10b4..099437ee54 100755
--- a/scripts/exglobal_stage_ic.sh
+++ b/scripts/exglobal_stage_ic.sh
@@ -36,21 +36,30 @@ for MEMDIR in "${MEMDIR_ARRAY[@]}"; do
for ftype in coupler.res fv_core.res.nc; do
src="${BASE_CPLIC}/${CPL_ATMIC:-}/${PDY}${cyc}/${MEMDIR}/atmos/${PDY}.${cyc}0000.${ftype}"
tgt="${COM_ATMOS_RESTART_PREV}/${PDY}.${cyc}0000.${ftype}"
- ${NCP} "${src}" "${tgt}"
- rc=$?
+ if [ ! -f ${tgt} ]; then
+ ${NCP} "${src}" "${tgt}"
+ rc=$?
+ else
+ rc=0
+ fi
((rc != 0)) && error_message "${src}" "${tgt}" "${rc}"
err=$((err + rc))
done
for ftype in ca_data fv_core.res fv_srf_wnd.res fv_tracer.res phy_data sfc_data; do
for ((tt = 1; tt <= ntiles; tt++)); do
src="${BASE_CPLIC}/${CPL_ATMIC:-}/${PDY}${cyc}/${MEMDIR}/atmos/${PDY}.${cyc}0000.${ftype}.tile${tt}.nc"
+ #tgt="${COM_ATMOS_RESTART_PREV}/${PDY}.${cyc}0000.${ftype}.tile${tt}.nc"
if (( tt > 6 )) ; then
tgt="${COM_ATMOS_RESTART_PREV}/${PDY}.${cyc}0000.${ftype}.nest0$((tt-5)).tile${tt}.nc"
else
tgt="${COM_ATMOS_RESTART_PREV}/${PDY}.${cyc}0000.${ftype}.tile${tt}.nc"
fi
- ${NCP} "${src}" "${tgt}"
- rc=$?
+ if [ ! -f ${tgt} ]; then
+ ${NCP} "${src}" "${tgt}"
+ rc=$?
+ else
+ rc=0
+ fi
((rc != 0)) && error_message "${src}" "${tgt}" "${rc}"
err=$((err + rc))
done
@@ -59,18 +68,35 @@ for MEMDIR in "${MEMDIR_ARRAY[@]}"; do
# Stage the FV3 cold-start initial conditions to ROTDIR
YMD=${PDY} HH=${cyc} declare_from_tmpl COM_ATMOS_INPUT
[[ ! -d "${COM_ATMOS_INPUT}" ]] && mkdir -p "${COM_ATMOS_INPUT}"
+
+ mkdir -p ${COM_ATMOS_INPUT}
+
src="${BASE_CPLIC}/${CPL_ATMIC:-}/${PDY}${cyc}/${MEMDIR}/atmos/gfs_ctrl.nc"
+ if [[ ! -f ${src} ]]; then
+ src="${BASE_CPLIC}/${CPL_ATMIC:-}/${RUN}.${PDY}/${cyc}/model_data/atmos/input/gfs_ctrl.nc"
+ fi
tgt="${COM_ATMOS_INPUT}/gfs_ctrl.nc"
- ${NCP} "${src}" "${tgt}"
- rc=$?
+ if [ ! -f ${tgt} ]; then
+ ${NCP} "${src}" "${tgt}"
+ rc=$?
+ else
+ rc=0
+ fi
((rc != 0)) && error_message "${src}" "${tgt}" "${rc}"
err=$((err + rc))
for ftype in gfs_data sfc_data; do
for ((tt = 1; tt <= ntiles; tt++)); do
src="${BASE_CPLIC}/${CPL_ATMIC:-}/${PDY}${cyc}/${MEMDIR}/atmos/${ftype}.tile${tt}.nc"
+ if [[ ! -f ${src} ]]; then
+ src="${BASE_CPLIC}/${CPL_ATMIC:-}/${RUN}.${PDY}/${cyc}/model_data/atmos/input/${ftype}.tile${tt}.nc"
+ fi
tgt="${COM_ATMOS_INPUT}/${ftype}.tile${tt}.nc"
- ${NCP} "${src}" "${tgt}"
- rc=$?
+ if [ ! -f ${tgt} ]; then
+ ${NCP} "${src}" "${tgt}"
+ rc=$?
+ else
+ rc=0
+ fi
((rc != 0)) && error_message "${src}" "${tgt}" "${rc}"
err=$((err + rc))
done
@@ -86,8 +112,12 @@ for MEMDIR in "${MEMDIR_ARRAY[@]}"; do
[[ ! -d "${COM_OCEAN_RESTART_PREV}" ]] && mkdir -p "${COM_OCEAN_RESTART_PREV}"
src="${BASE_CPLIC}/${CPL_OCNIC:-}/${PDY}${cyc}/${MEMDIR}/ocean/${PDY}.${cyc}0000.MOM.res.nc"
tgt="${COM_OCEAN_RESTART_PREV}/${PDY}.${cyc}0000.MOM.res.nc"
- ${NCP} "${src}" "${tgt}"
- rc=$?
+ if [ ! -f ${tgt} ]; then
+ ${NCP} "${src}" "${tgt}"
+ rc=$?
+ else
+ rc=0
+ fi
((rc != 0)) && error_message "${src}" "${tgt}" "${rc}"
err=$((err + rc))
case "${OCNRES}" in
@@ -98,8 +128,12 @@ for MEMDIR in "${MEMDIR_ARRAY[@]}"; do
for nn in $(seq 1 3); do
src="${BASE_CPLIC}/${CPL_OCNIC:-}/${PDY}${cyc}/${MEMDIR}/ocean/${PDY}.${cyc}0000.MOM.res_${nn}.nc"
tgt="${COM_OCEAN_RESTART_PREV}/${PDY}.${cyc}0000.MOM.res_${nn}.nc"
- ${NCP} "${src}" "${tgt}"
- rc=$?
+ if [ ! -f ${tgt} ]; then
+ ${NCP} "${src}" "${tgt}"
+ rc=$?
+ else
+ rc=0
+ fi
((rc != 0)) && error_message "${src}" "${tgt}" "${rc}"
err=$((err + rc))
done
@@ -116,8 +150,12 @@ for MEMDIR in "${MEMDIR_ARRAY[@]}"; do
if (( 0${MEMDIR:3} > 0 )) && [[ "${USE_OCN_PERTURB_FILES:-false}" == "true" ]]; then
src="${BASE_CPLIC}/${CPL_OCNIC:-}/${PDY}${cyc}/${MEMDIR}/ocean/${PDY}.${cyc}0000.mom6_increment.nc"
tgt="${COM_OCEAN_RESTART_PREV}/${PDY}.${cyc}0000.mom6_increment.nc"
- ${NCP} "${src}" "${tgt}"
- rc=${?}
+ if [ ! -f ${tgt} ]; then
+ ${NCP} "${src}" "${tgt}"
+ rc=$?
+ else
+ rc=0
+ fi
((rc != 0)) && error_message "${src}" "${tgt}" "${rc}"
err=$((err + rc))
fi
@@ -131,8 +169,12 @@ for MEMDIR in "${MEMDIR_ARRAY[@]}"; do
src="${BASE_CPLIC}/${CPL_MEDIC:-}/${PDY}${cyc}/${MEMDIR}/med/${PDY}.${cyc}0000.ufs.cpld.cpl.r.nc"
tgt="${COM_MED_RESTART_PREV}/${PDY}.${cyc}0000.ufs.cpld.cpl.r.nc"
if [[ -f "${src}" ]]; then
- ${NCP} "${src}" "${tgt}"
- rc=$?
+ if [ ! -f ${tgt} ]; then
+ ${NCP} "${src}" "${tgt}"
+ rc=$?
+ else
+ rc=0
+ fi
((rc != 0)) && error_message "${src}" "${tgt}" "${rc}"
err=$((err + rc))
else
@@ -148,8 +190,12 @@ for MEMDIR in "${MEMDIR_ARRAY[@]}"; do
[[ ! -d "${COM_ICE_RESTART_PREV}" ]] && mkdir -p "${COM_ICE_RESTART_PREV}"
src="${BASE_CPLIC}/${CPL_ICEIC:-}/${PDY}${cyc}/${MEMDIR}/ice/${PDY}.${cyc}0000.cice_model.res.nc"
tgt="${COM_ICE_RESTART_PREV}/${PDY}.${cyc}0000.cice_model.res.nc"
- ${NCP} "${src}" "${tgt}"
- rc=$?
+ if [ ! -f ${tgt} ]; then
+ ${NCP} "${src}" "${tgt}"
+ rc=$?
+ else
+ rc=0
+ fi
((rc != 0)) && error_message "${src}" "${tgt}" "${rc}"
err=$((err + rc))
fi
@@ -161,8 +207,12 @@ for MEMDIR in "${MEMDIR_ARRAY[@]}"; do
for grdID in ${waveGRD}; do # TODO: check if this is a bash array; if so adjust
src="${BASE_CPLIC}/${CPL_WAVIC:-}/${PDY}${cyc}/${MEMDIR}/wave/${PDY}.${cyc}0000.restart.${grdID}"
tgt="${COM_WAVE_RESTART}/${PDY}.${cyc}0000.restart.${grdID}"
- ${NCP} "${src}" "${tgt}"
- rc=$?
+ if [ ! -f ${tgt} ]; then
+ ${NCP} "${src}" "${tgt}"
+ rc=$?
+ else
+ rc=0
+ fi
((rc != 0)) && error_message "${src}" "${tgt}" "${rc}"
err=$((err + rc))
done
diff --git a/sorc/build_all.sh b/sorc/build_all.sh
index 28f52fd306..b6c4e6cc1c 100755
--- a/sorc/build_all.sh
+++ b/sorc/build_all.sh
@@ -145,7 +145,7 @@ build_opts["ww3prepost"]="${_wave_opt} ${_verbose_opt} ${_build_ufs_opt} ${_buil
# Optional DA builds
if [[ "${_build_ufsda}" == "YES" ]]; then
- if [[ "${MACHINE_ID}" != "orion" && "${MACHINE_ID}" != "hera" && "${MACHINE_ID}" != "hercules" && "${MACHINE_ID}" != "wcoss2" ]]; then
+ if [[ "${MACHINE_ID}" != "orion" && "${MACHINE_ID}" != "hera" && "${MACHINE_ID}" != "hercules" && "${MACHINE_ID}" != "wcoss2" && "${MACHINE_ID}" != "noaacloud" ]]; then
echo "NOTE: The GDAS App is not supported on ${MACHINE_ID}. Disabling build."
else
build_jobs["gdas"]=8
diff --git a/sorc/build_ufs.sh b/sorc/build_ufs.sh
index 7e84eaebc2..44c8c7a2ad 100755
--- a/sorc/build_ufs.sh
+++ b/sorc/build_ufs.sh
@@ -41,30 +41,9 @@ COMPILE_NR=0
CLEAN_BEFORE=YES
CLEAN_AFTER=NO
-if [[ "${MACHINE_ID}" != "noaacloud" ]]; then
- BUILD_JOBS=${BUILD_JOBS:-8} ./tests/compile.sh "${MACHINE_ID}" "${MAKE_OPT}" "${COMPILE_NR}" "intel" "${CLEAN_BEFORE}" "${CLEAN_AFTER}"
- mv "./tests/fv3_${COMPILE_NR}.exe" ./tests/ufs_model.x
- mv "./tests/modules.fv3_${COMPILE_NR}.lua" ./tests/modules.ufs_model.lua
- cp "./modulefiles/ufs_common.lua" ./tests/ufs_common.lua
-else
-
- if [[ "${PW_CSP:-}" == "aws" ]]; then
- set +x
- # TODO: This will need to be addressed further when the EPIC stacks are available/supported.
- module use /contrib/spack-stack/envs/ufswm/install/modulefiles/Core
- module load stack-intel
- module load stack-intel-oneapi-mpi
- module load ufs-weather-model-env/1.0.0
- # TODO: It is still uncertain why this is the only module that is
- # missing; check the spack build as this needed to be added manually.
- module load w3emc/2.9.2 # TODO: This has similar issues for the EPIC stack.
- module list
- set -x
- fi
-
- export CMAKE_FLAGS="${MAKE_OPT}"
- BUILD_JOBS=${BUILD_JOBS:-8} ./build.sh
- mv "${cwd}/ufs_model.fd/build/ufs_model" "${cwd}/ufs_model.fd/tests/ufs_model.x"
-fi
+BUILD_JOBS=${BUILD_JOBS:-8} ./tests/compile.sh "${MACHINE_ID}" "${MAKE_OPT}" "${COMPILE_NR}" "intel" "${CLEAN_BEFORE}" "${CLEAN_AFTER}"
+mv "./tests/fv3_${COMPILE_NR}.exe" ./tests/ufs_model.x
+mv "./tests/modules.fv3_${COMPILE_NR}.lua" ./tests/modules.ufs_model.lua
+cp "./modulefiles/ufs_common.lua" ./tests/ufs_common.lua
exit 0
diff --git a/sorc/com.slurm b/sorc/com.slurm
new file mode 100644
index 0000000000..5d0ded6832
--- /dev/null
+++ b/sorc/com.slurm
@@ -0,0 +1,19 @@
+#!/bin/bash
+#SBATCH --job-name=test-lustre
+#SBATCH --account=$USER
+#SBATCH --qos=batch
+#SBATCH --partition=compute
+#SBATCH -t 04:15:00
+#SBATCH --nodes=1
+##SBATCH --tasks-per-node=36
+##SBATCH --cpus-per-task=1
+#SBATCH -o gfsfcst.%J.log
+##SBATCH --export=NONE
+#SBATCH --exclusive
+
+set -x
+
+cd /contrib/Wei.Huang/src/global-workflow-cloud/sorc
+
+build_all.sh -guw
+
diff --git a/sorc/gsi_enkf.fd b/sorc/gsi_enkf.fd
index 8e279f9c73..59d7578b31 160000
--- a/sorc/gsi_enkf.fd
+++ b/sorc/gsi_enkf.fd
@@ -1 +1 @@
-Subproject commit 8e279f9c734097f673b07e80f385b2623d13ba4a
+Subproject commit 59d7578b31454140cb38bf65b27e3cffb02c7e3e
diff --git a/sorc/gsi_monitor.fd b/sorc/gsi_monitor.fd
index f9d6f5f744..37fbc100ae 160000
--- a/sorc/gsi_monitor.fd
+++ b/sorc/gsi_monitor.fd
@@ -1 +1 @@
-Subproject commit f9d6f5f744462a449e70abed8c5860b1c4564ad8
+Subproject commit 37fbc100aeb298ae6434910f897e43d1dd31f94c
diff --git a/sorc/gsi_utils.fd b/sorc/gsi_utils.fd
index d940406161..68bc14d30b 160000
--- a/sorc/gsi_utils.fd
+++ b/sorc/gsi_utils.fd
@@ -1 +1 @@
-Subproject commit d9404061611553459394173c3ff33116db306326
+Subproject commit 68bc14d30b3ca8f890f2761c8bdd0a3cea635cf1
diff --git a/sorc/link_workflow.sh b/sorc/link_workflow.sh
index f338f2bad3..580bc0ce39 100755
--- a/sorc/link_workflow.sh
+++ b/sorc/link_workflow.sh
@@ -76,6 +76,7 @@ case "${machine}" in
"jet") FIX_DIR="/lfs4/HFIP/hfv3gfs/glopara/git/fv3gfs/fix" ;;
"s4") FIX_DIR="/data/prod/glopara/fix" ;;
"gaea") FIX_DIR="/gpfs/f5/epic/proj-shared/global/glopara/data/fix" ;;
+ "noaacloud") FIX_DIR="/contrib/Wei.Huang/data/hack-orion/fix" ;;
*)
echo "FATAL: Unknown target machine ${machine}, couldn't set FIX_DIR"
exit 1
diff --git a/sorc/setup-case.sh b/sorc/setup-case.sh
new file mode 100755
index 0000000000..530cd47ba9
--- /dev/null
+++ b/sorc/setup-case.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+#https://global-workflow.readthedocs.io/en/latest/setup.html
+
+#set -x
+
+ GLOBALWORKFLOWHOME=/contrib/Wei.Huang/src/global-workflow-cloud
+#ATMOSRES=48
+ ATMOSRES=96
+#ATMOSRES=192
+#ATMOSRES=384
+ PSLOT=c${ATMOSRES}atm
+ EXPNAME=gfs
+ PSLOT=c${ATMOSRES}atm
+#PSLOT=C48C48mx500
+ IDATE=2024010100
+ EDATE=2024010100
+#IDATE=2021032306
+#EDATE=2021032306
+
+ GLOBALWORKFLOWTEMP=/lustre/Wei.Huang/run
+ COMROOT=${GLOBALWORKFLOWTEMP}/comroot
+
+ GLOBALWORKFLOWTEMP=/lustre/Wei.Huang/run
+ EXPDIR=${GLOBALWORKFLOWTEMP}/expdir
+
+ CONFIGDIR=${GLOBALWORKFLOWHOME}/parm/config
+ export BASE_CPLIC=/contrib/Wei.Huang/data/ICs
+ export CPL_ATMIC=C${ATMOSRES}
+#export BASE_CPLIC=/contrib/Wei.Huang/data/ICs
+#export CPL_ATMIC=C48C48mx500
+#export IC_PREFIX=gdas
+#export IC_TYPE=restart
+
+#workflow/hosts/awspw.yaml
+ export STMP=/lustre/${USER}/stmp
+ export PTMP=/lustre/${USER}/ptmp
+ export NOSCRUB=/s3bucket/${USER}/archive
+
+ source ${GLOBALWORKFLOWHOME}/workflow/gw_setup.sh
+
+ ${GLOBALWORKFLOWHOME}/workflow/setup_expt.py ${EXPNAME} forecast-only \
+ --app ATM \
+ --idate ${IDATE} \
+ --edate ${EDATE} \
+ --pslot ${PSLOT} \
+ --configdir ${CONFIGDIR}/gfs \
+ --resdetatmos ${ATMOSRES} \
+ --comroot ${COMROOT} \
+ --expdir ${EXPDIR}
+
+ ${GLOBALWORKFLOWHOME}/workflow/setup_xml.py ${EXPDIR}/${PSLOT}
+
diff --git a/ush/load_fv3gfs_modules.sh b/ush/load_fv3gfs_modules.sh
index ae0e381db4..2cafc4fd81 100755
--- a/ush/load_fv3gfs_modules.sh
+++ b/ush/load_fv3gfs_modules.sh
@@ -20,7 +20,7 @@ source "${HOMEgfs}/versions/run.ver"
module use "${HOMEgfs}/modulefiles"
case "${MACHINE_ID}" in
- "wcoss2" | "hera" | "orion" | "hercules" | "gaea" | "jet" | "s4")
+ "wcoss2" | "hera" | "orion" | "hercules" | "gaea" | "jet" | "s4" | "noaacloud")
module load "module_base.${MACHINE_ID}"
;;
*)
diff --git a/ush/module-setup.sh b/ush/module-setup.sh
index b4ec3edafa..398562652d 100755
--- a/ush/module-setup.sh
+++ b/ush/module-setup.sh
@@ -92,10 +92,8 @@ elif [[ ${MACHINE_ID} = discover* ]]; then
# TODO: This can likely be made more general once other cloud
# platforms come online.
elif [[ ${MACHINE_ID} = "noaacloud" ]]; then
-
- export SPACK_ROOT=/contrib/global-workflow/spack-stack/spack
- export PATH=${PATH}:${SPACK_ROOT}/bin
- . "${SPACK_ROOT}"/share/spack/setup-env.sh
+ # We are on NOAA Cloud
+ module purge
else
echo WARNING: UNKNOWN PLATFORM 1>&2
diff --git a/versions/build.noaacloud.ver b/versions/build.noaacloud.ver
new file mode 100644
index 0000000000..e08c4a3fec
--- /dev/null
+++ b/versions/build.noaacloud.ver
@@ -0,0 +1,5 @@
+export stack_intel_ver=2021.3.0
+export stack_impi_ver=2021.3.0
+export spack_env=gsi-addon-env
+source "${HOMEgfs:-}/versions/build.spack.ver"
+export spack_mod_path="/work/noaa/epic/role-epic/spack-stack/orion/spack-stack-${spack_stack_ver}/envs/${spack_env}/install/modulefiles/Core"
diff --git a/versions/run.noaacloud.ver b/versions/run.noaacloud.ver
new file mode 100644
index 0000000000..251c750923
--- /dev/null
+++ b/versions/run.noaacloud.ver
@@ -0,0 +1,14 @@
+export stack_intel_ver=2021.3.0
+export stack_impi_ver=2021.3.0
+export spack_env=gsi-addon-env
+#export spack_env=unified-env
+
+export ncl_ver=6.6.2
+export gempak_ver=7.4.2
+
+source "${HOMEgfs:-}/versions/run.spack.ver"
+export spack_mod_path="/work/noaa/epic/role-epic/spack-stack/orion/spack-stack-${spack_stack_ver}/envs/${spack_env}/install/modulefiles/Core"
+export spack_mod_path="/contrib/spack-stack/spack-stack-${spack_stack_ver}/envs/gsi-addon-env/install/modulefiles/Core"
+
+export cdo_ver=2.2.0
+#export R_ver=4.0.2
diff --git a/workflow/hosts.py b/workflow/hosts.py
index 2334a3ac35..3ae95c38bb 100644
--- a/workflow/hosts.py
+++ b/workflow/hosts.py
@@ -15,7 +15,8 @@ class Host:
"""
SUPPORTED_HOSTS = ['HERA', 'ORION', 'JET', 'HERCULES',
- 'WCOSS2', 'S4', 'CONTAINER', 'AWSPW', 'GAEA']
+ 'WCOSS2', 'S4', 'CONTAINER', 'GAEA',
+ 'AWSPW', 'AZUREPW', 'GOOGLEPW']
def __init__(self, host=None):
@@ -54,7 +55,7 @@ def detect(cls):
elif container is not None:
machine = 'CONTAINER'
elif pw_csp is not None:
- if pw_csp.lower() not in ['azure', 'aws', 'gcp']:
+ if pw_csp.lower() not in ['azure', 'aws', 'google']:
raise ValueError(
f'NOAA cloud service provider "{pw_csp}" is not supported.')
machine = f"{pw_csp.upper()}PW"
diff --git a/workflow/hosts/awspw.yaml b/workflow/hosts/awspw.yaml
index d2223e799e..90546e6093 100644
--- a/workflow/hosts/awspw.yaml
+++ b/workflow/hosts/awspw.yaml
@@ -3,12 +3,18 @@ DMPDIR: '/scratch1/NCEPDEV/global/glopara/dump' # TODO: This does not yet exist.
PACKAGEROOT: '/scratch1/NCEPDEV/global/glopara/nwpara' #TODO: This does not yet exist.
COMINsyn: '/scratch1/NCEPDEV/global/glopara/com/gfs/prod/syndat' #TODO: This does not yet exist.
HOMEDIR: '/contrib/${USER}'
-STMP: '/lustre/${USER}/stmp2/'
-PTMP: '/lustre/${USER}/stmp4/'
+STMP: '/lustre/${USER}/stmp/'
+PTMP: '/lustre/${USER}/ptmp/'
NOSCRUB: ${HOMEDIR}
+NOSCRUB: '/contrib/${USER}/scrub'
+BASE_CPLIC: '/contrib/Wei.Huang/data/ICs'
+CPL_ATMIC: 'C96'
+IC_PREFIX: 'gfs'
+IC_TYPE: 'input'
ACCOUNT: hwufscpldcld
ACCOUNT_SERVICE: hwufscpldcld
SCHEDULER: slurm
+ACCOUNT: $USER
QUEUE: batch
QUEUE_SERVICE: batch
PARTITION_BATCH: compute
@@ -16,7 +22,7 @@ PARTITION_SERVICE: compute
RESERVATION: ''
CHGRP_RSTPROD: 'YES'
CHGRP_CMD: 'chgrp rstprod' # TODO: This is not yet supported.
-HPSSARCH: 'YES'
+HPSSARCH: 'NO'
HPSS_PROJECT: emc-global #TODO: See `ATARDIR` below.
LOCALARCH: 'NO'
ATARDIR: '/NCEPDEV/${HPSS_PROJECT}/1year/${USER}/${machine}/scratch/${PSLOT}' # TODO: This will not yet work from AWS.
diff --git a/workflow/hosts/azurepw.yaml b/workflow/hosts/azurepw.yaml
new file mode 100644
index 0000000000..640a8d97c5
--- /dev/null
+++ b/workflow/hosts/azurepw.yaml
@@ -0,0 +1,24 @@
+BASE_GIT: '/work/noaa/global/glopara/git'
+DMPDIR: '/work/noaa/rstprod/dump'
+BASE_CPLIC: '/contrib/Wei.Huang/data/ICs'
+PACKAGEROOT: '/work/noaa/global/glopara/nwpara'
+COMINsyn: '/work/noaa/global/glopara/com/gfs/prod/syndat'
+HOMEDIR: '/contrib/${USER}/src/global-workflow-cloud'
+STMP: '/lustre/${USER}/stmp'
+PTMP: '/lustre/${USER}/ptmp'
+NOSCRUB: '/lustre/${USER}/scrub'
+SCHEDULER: slurm
+ACCOUNT: $USER
+QUEUE: batch
+QUEUE_SERVICE: batch
+PARTITION_BATCH: compute
+PARTITION_SERVICE: compute
+CHGRP_RSTPROD: 'YES'
+CHGRP_CMD: 'chgrp rstprod' # TODO: This is not yet supported.
+HPSSARCH: 'NO'
+HPSS_PROJECT: emc-global #TODO: See `ATARDIR` below.
+LOCALARCH: 'NO'
+ATARDIR: '/NCEPDEV/${HPSS_PROJECT}/1year/${USER}/${machine}/scratch/${PSLOT}' # TODO: This will not yet work from AWS.
+MAKE_NSSTBUFR: 'NO'
+MAKE_ACFTBUFR: 'NO'
+SUPPORTED_RESOLUTIONS: ['C48', 'C96'] # TODO: Test and support all cubed-sphere resolutions.
diff --git a/workflow/hosts/googlepw.yaml b/workflow/hosts/googlepw.yaml
new file mode 100644
index 0000000000..640a8d97c5
--- /dev/null
+++ b/workflow/hosts/googlepw.yaml
@@ -0,0 +1,24 @@
+BASE_GIT: '/work/noaa/global/glopara/git'
+DMPDIR: '/work/noaa/rstprod/dump'
+BASE_CPLIC: '/contrib/Wei.Huang/data/ICs'
+PACKAGEROOT: '/work/noaa/global/glopara/nwpara'
+COMINsyn: '/work/noaa/global/glopara/com/gfs/prod/syndat'
+HOMEDIR: '/contrib/${USER}/src/global-workflow-cloud'
+STMP: '/lustre/${USER}/stmp'
+PTMP: '/lustre/${USER}/ptmp'
+NOSCRUB: '/lustre/${USER}/scrub'
+SCHEDULER: slurm
+ACCOUNT: $USER
+QUEUE: batch
+QUEUE_SERVICE: batch
+PARTITION_BATCH: compute
+PARTITION_SERVICE: compute
+CHGRP_RSTPROD: 'YES'
+CHGRP_CMD: 'chgrp rstprod' # TODO: This is not yet supported.
+HPSSARCH: 'NO'
+HPSS_PROJECT: emc-global #TODO: See `ATARDIR` below.
+LOCALARCH: 'NO'
+ATARDIR: '/NCEPDEV/${HPSS_PROJECT}/1year/${USER}/${machine}/scratch/${PSLOT}' # TODO: This will not yet work from AWS.
+MAKE_NSSTBUFR: 'NO'
+MAKE_ACFTBUFR: 'NO'
+SUPPORTED_RESOLUTIONS: ['C48', 'C96'] # TODO: Test and support all cubed-sphere resolutions.
diff --git a/workflow/rocoto/gfs_tasks.py b/workflow/rocoto/gfs_tasks.py
index fa218c6713..7f1eb9a509 100644
--- a/workflow/rocoto/gfs_tasks.py
+++ b/workflow/rocoto/gfs_tasks.py
@@ -3,6 +3,7 @@
from wxflow import timedelta_to_HMS
import rocoto.rocoto as rocoto
import numpy as np
+import os
class GFSTasks(Tasks):
@@ -24,7 +25,33 @@ def stage_ic(self):
# Atm ICs
if self.app_config.do_atm:
- prefix = f"{cpl_ic['BASE_CPLIC']}/{cpl_ic['CPL_ATMIC']}/@Y@m@d@H/atmos"
+ pslot = self._base['PSLOT']
+
+ if ( 'BASE_CPLIC' in cpl_ic.keys() ):
+ base_cplic = f"{cpl_ic['BASE_CPLIC']}"
+ else:
+ base_cplic = os.environ.get('BASE_CPLIC')
+ if ( 'CPL_ATMIC' in cpl_ic.keys() ):
+ cpl_atmic = f"{cpl_ic['CPL_ATMIC']}"
+ else:
+ cpl_atmic = os.environ.get('CPL_ATMIC')
+
+ prefix = f"{base_cplic}/{cpl_atmic}/@Y@m@d@H/atmos"
+
+ pw_csp = os.environ.get('PW_CSP')
+ use_ufs_utils_format = os.environ.get('USE_UFS_UTILS_FORMAT', False)
+ if ( pw_csp in ['aws', 'azure', 'google'] or use_ufs_utils_format):
+ icdir = f"{base_cplic}/{cpl_atmic}"
+
+ if('IC_PREFIX' in cpl_ic.keys()):
+ cpl_ic_prefix = f"{icdir}/{cpl_ic['IC_PREFIX']}"
+ else:
+ cpl_ic_prefix = 'gfs'
+ if('IC_TYPE' in cpl_ic.keys()):
+ cpl_ic_type = f"{cpl_ic['IC_TYPE']}"
+ else:
+ cpl_ic_type = 'input'
+ prefix = f"{icdir}/{cpl_ic_prefix}.@Y@m@d/@H/model_data/atmos/{cpl_ic_type}"
for file in ['gfs_ctrl.nc'] + \
[f'{datatype}_data.tile{tile}.nc'
for datatype in ['gfs', 'sfc']
diff --git a/workflow/rocoto/rocoto.py b/workflow/rocoto/rocoto.py
index 0abb56cafb..7920d64f01 100644
--- a/workflow/rocoto/rocoto.py
+++ b/workflow/rocoto/rocoto.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
+import os
from typing import Union, List, Dict, Any
'''
@@ -144,8 +145,11 @@ def _create_innermost_task(task_dict: Dict[str, Any]) -> List[str]:
strings.append(f'\t{partition}\n')
strings.append(f'\t{walltime}\n')
strings.append(f'\t{nodes}:ppn={ppn}:tpp={threads}\n')
- if memory is not None:
- strings.append(f'\t{memory}\n')
+
+ pw_csp = os.environ.get('PW_CSP')
+ if pw_csp not in ['aws', 'azure', 'google']:
+ if memory is not None:
+ strings.append(f'\t{memory}\n')
if native is not None:
strings.append(f'\t{native}\n')
strings.append('\n')
diff --git a/workflow/rocoto/tasks.py b/workflow/rocoto/tasks.py
index ad135be713..1f9a091fa5 100644
--- a/workflow/rocoto/tasks.py
+++ b/workflow/rocoto/tasks.py
@@ -1,5 +1,6 @@
#!/usr/bin/env python3
+import os
import numpy as np
from applications.applications import AppConfig
import rocoto.rocoto as rocoto
@@ -214,7 +215,11 @@ def get_resource(self, task_name):
else:
native += ':shared'
elif scheduler in ['slurm']:
- native = '--export=NONE'
+ pw_csp = os.environ.get('PW_CSP')
+ if ( pw_csp in ['aws', 'azure', 'google'] ):
+ native = '--export=ALL --exclusive'
+ else:
+ native = '--export=NONE'
if task_config['RESERVATION'] != "":
native += '' if task_name in Tasks.SERVICE_TASKS else ' --reservation=' + task_config['RESERVATION']
diff --git a/workflow/rocoto/workflow_xml.py b/workflow/rocoto/workflow_xml.py
index 11b2cdfc45..c586fba0f5 100644
--- a/workflow/rocoto/workflow_xml.py
+++ b/workflow/rocoto/workflow_xml.py
@@ -8,6 +8,7 @@
from applications.applications import AppConfig
from rocoto.workflow_tasks import get_wf_tasks
import rocoto.rocoto as rocoto
+import numpy as np
from abc import ABC, abstractmethod
@@ -156,11 +157,21 @@ def _write_crontab(self, crontab_file: str = None, cronint: int = 5) -> None:
replyto = ''
strings = ['',
- f'#################### {pslot} ####################',
- f'MAILTO="{replyto}"',
- f'{cronintstr} {rocotorunstr}',
- '#################################################################',
- '']
+ f'#################### {pslot} ####################',
+ f'MAILTO="{replyto}"'
+ ]
+ pw_csp = os.environ.get('PW_CSP')
+ if ( pw_csp in ['aws', 'azure', 'google'] ):
+ strings = np.append(strings,
+ [
+ f'SHELL="/bin/bash"',
+ f'BASH_ENV="/etc/bashrc"'
+ ])
+ strings = np.append(strings,
+ [
+ f'{cronintstr} {rocotorunstr}',
+ '#################################################################',
+ ''])
if crontab_file is None:
crontab_file = f"{expdir}/{pslot}.crontab"
diff --git a/workflow/setup_expt.py b/workflow/setup_expt.py
index 97d25dc15a..1da7289b00 100755
--- a/workflow/setup_expt.py
+++ b/workflow/setup_expt.py
@@ -331,6 +331,7 @@ def edit_baseconfig(host, inputs, yaml_dict):
"@OCNRES@": f"{int(100.*inputs.resdetocean):03d}",
"@EXPDIR@": inputs.expdir,
"@COMROOT@": inputs.comroot,
+ "@BASE_CPLIC@": inputs.base_cplic,
"@EXP_WARM_START@": is_warm_start,
"@MODE@": inputs.mode,
"@gfs_cyc@": inputs.gfs_cyc,
@@ -564,6 +565,10 @@ def get_ocean_resolution(resdetatmos):
def main(*argv):
user_inputs = input_args(*argv)
+
+ user_inputs.base_cplic = os.getenv('BASE_CPLIC', '/scratch2/NAGAPE/epic/Wei.Huang/data')
+ print('user_inputs =', user_inputs)
+
host = Host()
validate_user_request(host, user_inputs)