From 9280086bfc31ebab0ae3115792c8bde6682ab072 Mon Sep 17 00:00:00 2001 From: TerrenceMcGuinness-NOAA Date: Tue, 13 Aug 2024 09:28:53 -0400 Subject: [PATCH] Jenkins Pipeline Updates (#2815) Pipeline updates: - get `gh` location on remote machine and define global bash env variable `$GH` for GitHub CLI - Failed cases are now displayed accordingly in the Jenkins dashboard (see NOTE below) - Added the Build # in messaging for clarity when running from re-runs. - Replaced Matrix construct for concurrency with the parallel method that can use dynamic case lists - With removing of the hard coded list of cases we now get list of cases dynamically from the PR case directory - See new look of dashboard below (has more annotations and displays only used cases) NOTE: **failFast** (quitting all cases on failing of one) still does not work because it isn't quitting the running remote shells. We can make this a configurable capability in feature request with some custom code. The current behavior has the remaining cases continuing to run after a FAIL label has been issued and it is incumbent of the code manager to kill the CI job in the controller before resetting another Ready label. --------- Co-authored-by: David Huber <69919478+DavidHuber-NOAA@users.noreply.github.com> --- ci/Jenkinsfile | 15 +++++++++------ 1 file changed, 9 insertions(+), 6 deletions(-) diff --git a/ci/Jenkinsfile b/ci/Jenkinsfile index 3049abd26b..38faadb1f0 100644 --- a/ci/Jenkinsfile +++ b/ci/Jenkinsfile @@ -1,7 +1,8 @@ def Machine = 'none' def machine = 'none' def CUSTOM_WORKSPACE = 'none' -def caseList = '' +def cases = '' +def GH = 'none' // Location of the custom workspaces for each machine in the CI system. They are persitent for each iteration of the PR. def NodeName = [hera: 'Hera-EMC', orion: 'Orion-EMC', hercules: 'Hercules-EMC', gaea: 'Gaea'] def custom_workspace = [hera: '/scratch1/NCEPDEV/global/CI', orion: '/work2/noaa/stmp/CI/ORION', hercules: '/work2/noaa/stmp/CI/HERCULES', gaea: '/gpfs/f5/epic/proj-shared/global/CI'] @@ -78,6 +79,7 @@ pipeline { echo "Getting Common Workspace for ${Machine}" ws("${custom_workspace[machine]}/${env.CHANGE_ID}") { properties([parameters([[$class: 'NodeParameterDefinition', allowedSlaves: ['built-in', 'Hercules-EMC', 'Hera-EMC', 'Orion-EMC'], defaultSlaves: ['built-in'], name: '', nodeEligibility: [$class: 'AllNodeEligibility'], triggerIfResult: 'allCases']])]) + GH = sh(script: "which gh || echo '~/bin/gh'", returnStdout: true).trim() CUSTOM_WORKSPACE = "${WORKSPACE}" sh(script: "mkdir -p ${CUSTOM_WORKSPACE}/RUNTESTS;rm -Rf ${CUSTOM_WORKSPACE}/RUNTESTS/*") sh(script: """${GH} pr edit ${env.CHANGE_ID} --repo ${repo_url} --add-label "CI-${Machine}-Building" --remove-label "CI-${Machine}-Ready" """) @@ -97,7 +99,7 @@ pipeline { } } stages { - stage('build system') { + stage('Building') { steps { catchError(buildResult: 'UNSTABLE', stageResult: 'FAILURE') { script { @@ -116,7 +118,7 @@ pipeline { checkout scm } catch (Exception e) { if (env.CHANGE_ID) { - sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "Checkout **Failed** on ${Machine}: ${e.getMessage()}" """) + sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "Checkout **Failed** on ${Machine} in Build# ${env.BUILD_NUMBER}: ${e.getMessage()}" """) } STATUS = 'Failed' error("Failed to checkout: ${e.getMessage()}") @@ -149,7 +151,7 @@ pipeline { try { sh(script: "${HOMEgfs}/ci/scripts/utils/publish_logs.py --file ${error_logs} --repo PR_BUILD_${env.CHANGE_ID}") gist_url=sh(script: "${HOMEgfs}/ci/scripts/utils/publish_logs.py --file ${error_logs} --gist PR_BUILD_${env.CHANGE_ID}", returnStdout: true).trim() - sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "Build **FAILED** on **${Machine}** with error logs:\n\\`\\`\\`\n${error_logs_message}\\`\\`\\`\n\nFollow link here to view the contents of the above file(s): [(link)](${gist_url})" """) + sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body "Build **FAILED** on **${Machine}** in Build# ${env.BUILD_NUMBER} with error logs:\n\\`\\`\\`\n${error_logs_message}\\`\\`\\`\n\nFollow link here to view the contents of the above file(s): [(link)](${gist_url})" """) } catch (Exception error_comment) { echo "Failed to comment on PR: ${error_comment.getMessage()}" } @@ -169,7 +171,7 @@ pipeline { } } if (system == 'gfs') { - caseList = sh(script: "${HOMEgfs}/ci/scripts/utils/get_host_case_list.py ${machine}", returnStdout: true).trim().split() + cases = sh(script: "${HOMEgfs}/ci/scripts/utils/get_host_case_list.py ${machine}", returnStdout: true).trim().split() } } } @@ -276,6 +278,7 @@ pipeline { } } } + stage( '5. FINALIZE' ) { agent { label NodeName[machine].toLowerCase() } @@ -291,7 +294,7 @@ pipeline { """, returnStatus: true) sh(script: """${GH} pr edit ${env.CHANGE_ID} --repo ${repo_url} --add-label "CI-${Machine}-${STATUS}" """, returnStatus: true) if (fileExists("${CUSTOM_WORKSPACE}/RUNTESTS/ci-run_check.log")) { - sh(script: """echo "**CI ${STATUS}** ${Machine} at
Built and ran in directory \\`${CUSTOM_WORKSPACE}\\`\n\\`\\`\\`\n" | cat - ${CUSTOM_WORKSPACE}/RUNTESTS/ci-run_check.log > temp && mv temp ${CUSTOM_WORKSPACE}/RUNTESTS/ci-run_check.log""", returnStatus: true) + sh(script: """echo "**CI ${STATUS}** on ${Machine} in Build# ${env.BUILD_NUMBER}
Built and ran in directory \\`${CUSTOM_WORKSPACE}\\`\n\\`\\`\\`\n" | cat - ${CUSTOM_WORKSPACE}/RUNTESTS/ci-run_check.log > temp && mv temp ${CUSTOM_WORKSPACE}/RUNTESTS/ci-run_check.log""", returnStatus: true) sh(script: """${GH} pr comment ${env.CHANGE_ID} --repo ${repo_url} --body-file ${CUSTOM_WORKSPACE}/RUNTESTS/ci-run_check.log """, returnStatus: true) } if (STATUS == 'Passed') {